summaryrefslogtreecommitdiff
path: root/drivers/platform/msm/mhi_dev/mhi.h
blob: 6b3c6a8a78b29e8eaa65d90945c309d2b3494ecf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#ifndef __MHI_H
#define __MHI_H

#include <linux/msm_ep_pcie.h>
#include <linux/types.h>
#include <linux/ipc_logging.h>
#include <linux/dma-mapping.h>

/**
 * MHI control data structures alloted by the host, including
 * channel context array, event context array, command context and rings.
 */

/* Channel context state */
enum mhi_dev_ch_ctx_state {
	MHI_DEV_CH_STATE_DISABLED,
	MHI_DEV_CH_STATE_ENABLED,
	MHI_DEV_CH_STATE_RUNNING,
	MHI_DEV_CH_STATE_SUSPENDED,
	MHI_DEV_CH_STATE_STOP,
	MHI_DEV_CH_STATE_ERROR,
	MHI_DEV_CH_STATE_RESERVED,
	MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};

/* Channel type */
enum mhi_dev_ch_ctx_type {
	MHI_DEV_CH_TYPE_NONE,
	MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
	MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
	MHI_DEV_CH_RESERVED
};

/* Channel context type */
struct mhi_dev_ch_ctx {
	enum mhi_dev_ch_ctx_state	ch_state;
	enum mhi_dev_ch_ctx_type	ch_type;
	uint32_t			err_indx;
	uint64_t			rbase;
	uint64_t			rlen;
	uint64_t			rp;
	uint64_t			wp;
} __packed;

enum mhi_dev_ring_element_type_id {
	MHI_DEV_RING_EL_INVALID = 0,
	MHI_DEV_RING_EL_NOOP = 1,
	MHI_DEV_RING_EL_TRANSFER = 2,
	MHI_DEV_RING_EL_RESET = 16,
	MHI_DEV_RING_EL_STOP = 17,
	MHI_DEV_RING_EL_START = 18,
	MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
	MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
	MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
	MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
	MHI_DEV_RING_EL_UNDEF
};

enum mhi_dev_ring_state {
	RING_STATE_UINT = 0,
	RING_STATE_IDLE,
	RING_STATE_PENDING,
};

enum mhi_dev_ring_type {
	RING_TYPE_CMD = 0,
	RING_TYPE_ER,
	RING_TYPE_CH,
	RING_TYPE_INVAL
};

/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
	MHI_DEV_EVT_INT_MODERATION_DISABLED
};

/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
	MHI_DEV_EVT_TYPE_DEFAULT,
	MHI_DEV_EVT_TYPE_VALID,
	MHI_DEV_EVT_RESERVED
};

/* Event ring context type */
struct mhi_dev_ev_ctx {
	uint32_t				res1:16;
	enum mhi_dev_evt_ctx_int_mod_timer	intmodt:16;
	enum mhi_dev_evt_ctx_event_ring_type	ertype;
	uint32_t				msivec;
	uint64_t				rbase;
	uint64_t				rlen;
	uint64_t				rp;
	uint64_t				wp;
} __packed;

/* Command context */
struct mhi_dev_cmd_ctx {
	uint32_t				res1;
	uint32_t				res2;
	uint32_t				res3;
	uint64_t				rbase;
	uint64_t				rlen;
	uint64_t				rp;
	uint64_t				wp;
} __packed;

/* generic context */
struct mhi_dev_gen_ctx {
	uint32_t				res1;
	uint32_t				res2;
	uint32_t				res3;
	uint64_t				rbase;
	uint64_t				rlen;
	uint64_t				rp;
	uint64_t				wp;
} __packed;

/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
	uint64_t				data_buf_ptr;
	uint32_t				len:16;
	uint32_t				res1:16;
	uint32_t				chain:1;
	uint32_t				res2:7;
	uint32_t				ieob:1;
	uint32_t				ieot:1;
	uint32_t				bei:1;
	uint32_t				res3:5;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				res4:8;
} __packed;

/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
	uint64_t				res1;
	uint32_t				res2;
	uint32_t				res3:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				chid:8;
} __packed;

/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
	uint64_t				res1;
	uint32_t				res2;
	uint32_t				res3:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				chid:8;
} __packed;

/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
	uint64_t				res1;
	uint32_t				res2;
	uint32_t				res3:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				chid:8;
} __packed;

/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
	uint64_t				res1;
	uint32_t				seqnum;
	uint32_t				reliable:1;
	uint32_t				res2:15;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				chid:8;
} __packed;

enum mhi_dev_cmd_completion_code {
	MHI_CMD_COMPL_CODE_INVALID = 0,
	MHI_CMD_COMPL_CODE_SUCCESS = 1,
	MHI_CMD_COMPL_CODE_EOT = 2,
	MHI_CMD_COMPL_CODE_OVERFLOW = 3,
	MHI_CMD_COMPL_CODE_EOB = 4,
	MHI_CMD_COMPL_CODE_UNDEFINED = 16,
	MHI_CMD_COMPL_CODE_RING_EL = 17,
	MHI_CMD_COMPL_CODE_RES
};

/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
	uint64_t				ptr;
	uint32_t				len:16;
	uint32_t				res1:8;
	enum mhi_dev_cmd_completion_code	code:8;
	uint32_t				res2:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				chid:8;
} __packed;

/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
	uint64_t				ptr;
	uint32_t				res1:24;
	enum mhi_dev_cmd_completion_code	code:8;
	uint32_t				res2:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				res3:8;
} __packed;

enum mhi_dev_state {
	MHI_DEV_RESET_STATE = 0,
	MHI_DEV_READY_STATE,
	MHI_DEV_M0_STATE,
	MHI_DEV_M1_STATE,
	MHI_DEV_M2_STATE,
	MHI_DEV_M3_STATE,
	MHI_DEV_MAX_STATE,
	MHI_DEV_SYSERR_STATE = 0xff
};

/* MHI state change event */
struct mhi_dev_event_ring_state_change {
	uint64_t				ptr;
	uint32_t				res1:24;
	enum mhi_dev_state			mhistate:8;
	uint32_t				res2:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				res3:8;
} __packed;

enum mhi_dev_execenv {
	MHI_DEV_SBL_EE = 1,
	MHI_DEV_AMSS_EE = 2,
	MHI_DEV_UNRESERVED
};

/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
	uint64_t				ptr;
	uint32_t				res1:24;
	enum mhi_dev_execenv			execenv:8;
	uint32_t				res2:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				res3:8;
} __packed;

/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
	uint64_t				ptr;
	uint32_t				res1:24;
	enum mhi_dev_state			mhistate:8;
	uint32_t				res2:16;
	enum mhi_dev_ring_element_type_id	type:8;
	uint32_t				chid:8;
} __packed;

struct mhi_config {
	uint32_t	mhi_reg_len;
	uint32_t	version;
	uint32_t	event_rings;
	uint32_t	channels;
	uint32_t	chdb_offset;
	uint32_t	erdb_offset;
};

#define NUM_CHANNELS			128
#define HW_CHANNEL_BASE			100
#define HW_CHANNEL_END			107
#define MHI_ENV_VALUE			2
#define MHI_MASK_ROWS_CH_EV_DB		4
#define TRB_MAX_DATA_SIZE		4096

/* Possible ring element types */
union mhi_dev_ring_element_type {
	struct mhi_dev_cmd_ring_op			cmd_no_op;
	struct mhi_dev_cmd_ring_reset_channel_cmd	cmd_reset;
	struct mhi_dev_cmd_ring_stop_channel_cmd	cmd_stop;
	struct mhi_dev_cmd_ring_start_channel_cmd	cmd_start;
	struct mhi_dev_transfer_ring_element		tre;
	struct mhi_dev_event_ring_transfer_completion	evt_tr_comp;
	struct mhi_dev_event_ring_cmd_completion	evt_cmd_comp;
	struct mhi_dev_event_ring_state_change		evt_state_change;
	struct mhi_dev_event_ring_ee_state_change	evt_ee_state;
	struct mhi_dev_ring_generic			generic;
};

/* Transfer ring element type */
union mhi_dev_ring_ctx {
	struct mhi_dev_cmd_ctx		cmd;
	struct mhi_dev_ev_ctx		ev;
	struct mhi_dev_ch_ctx		ch;
	struct mhi_dev_gen_ctx		generic;
};

/* MHI host Control and data address region */
struct mhi_host_addr {
	uint32_t	ctrl_base_lsb;
	uint32_t	ctrl_base_msb;
	uint32_t	ctrl_limit_lsb;
	uint32_t	ctrl_limit_msb;
	uint32_t	data_base_lsb;
	uint32_t	data_base_msb;
	uint32_t	data_limit_lsb;
	uint32_t	data_limit_msb;
};

/* MHI physical and virtual address region */
struct mhi_meminfo {
	struct device	*dev;
	uintptr_t	pa_aligned;
	uintptr_t	pa_unaligned;
	uintptr_t	va_aligned;
	uintptr_t	va_unaligned;
	uintptr_t	size;
};

struct mhi_addr {
	uint64_t	host_pa;
	uintptr_t	device_pa;
	uintptr_t	device_va;
	uint32_t	size;
};

struct mhi_interrupt_state {
	uint32_t	mask;
	uint32_t	status;
};

enum mhi_dev_channel_state {
	MHI_DEV_CH_UNINT,
	MHI_DEV_CH_STARTED,
	MHI_DEV_CH_PENDING_START,
	MHI_DEV_CH_PENDING_STOP,
	MHI_DEV_CH_STOPPED,
	MHI_DEV_CH_CLOSED,
};

enum mhi_dev_ch_operation {
	MHI_DEV_OPEN_CH,
	MHI_DEV_CLOSE_CH,
	MHI_DEV_READ_CH,
	MHI_DEV_READ_WR,
	MHI_DEV_POLL,
};

struct mhi_dev_channel;

struct mhi_dev_ring {
	struct list_head			list;
	struct mhi_dev				*mhi_dev;

	uint32_t				id;
	uint32_t				rd_offset;
	uint32_t				wr_offset;
	uint32_t				ring_size;

	enum mhi_dev_ring_type			type;
	enum mhi_dev_ring_state			state;

	/* device virtual address location of the cached host ring ctx data */
	union mhi_dev_ring_element_type		*ring_cache;
	/* Physical address of the cached ring copy on the device side */
	dma_addr_t				ring_cache_dma_handle;
	/* Physical address of the host where we will write/read to/from */
	struct mhi_addr				ring_shadow;
	/* Ring type - cmd, event, transfer ring and its rp/wp... */
	union mhi_dev_ring_ctx			*ring_ctx;
	/* ring_ctx_shadow -> tracking ring_ctx in the host */
	union mhi_dev_ring_ctx			*ring_ctx_shadow;
	void (*ring_cb)(struct mhi_dev *dev,
			union mhi_dev_ring_element_type *el,
			void *ctx);
};

static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring,
						uint32_t rd_offset)
{
	ring->rd_offset++;
	if (ring->rd_offset == ring->ring_size)
		ring->rd_offset = 0;
}

/* trace information planned to use for read/write */
#define TRACE_DATA_MAX				128
#define MHI_DEV_DATA_MAX			512

#define MHI_DEV_MMIO_RANGE			0xc80

enum cb_reason {
	MHI_DEV_TRE_AVAILABLE = 0,
};

struct mhi_dev_client_cb_reason {
	uint32_t		ch_id;
	enum cb_reason		reason;
};

struct mhi_dev_client {
	struct list_head		list;
	struct mhi_dev_channel		*channel;
	void (*event_trigger)(struct mhi_dev_client_cb_reason *cb);

	/* mhi_dev calls are fully synchronous -- only one call may be
	 * active per client at a time for now.
	 */
	struct mutex			write_lock;
	wait_queue_head_t		wait;

	/* trace logs */
	spinlock_t			tr_lock;
	unsigned			tr_head;
	unsigned			tr_tail;
	struct mhi_dev_trace		*tr_log;

	/* client buffers */
	struct mhi_dev_iov		*iov;
	uint32_t			nr_iov;
};

struct mhi_dev_channel {
	struct list_head		list;
	struct list_head		clients;
	/* synchronization for changing channel state,
	 * adding/removing clients, mhi_dev callbacks, etc
	 */
	spinlock_t			lock;

	struct mhi_dev_ring		*ring;

	enum mhi_dev_channel_state	state;
	uint32_t			ch_id;
	enum mhi_dev_ch_ctx_type	ch_type;
	struct mutex			ch_lock;
	/* client which the current inbound/outbound message is for */
	struct mhi_dev_client		*active_client;

	/* current TRE being processed */
	uint64_t			tre_loc;
	/* current TRE size */
	uint32_t			tre_size;
	/* tre bytes left to read/write */
	uint32_t			tre_bytes_left;
	/* td size being read/written from/to so far */
	uint32_t			td_size;
	bool				wr_request_active;
	bool				skip_td;
};

/* Structure device for mhi dev */
struct mhi_dev {
	struct platform_device		*pdev;
	struct device			*dev;
	/* MHI MMIO related members */
	phys_addr_t			mmio_base_pa_addr;
	void				*mmio_base_addr;
	phys_addr_t			ipa_uc_mbox_crdb;
	phys_addr_t			ipa_uc_mbox_erdb;

	uint32_t			*mmio_backup;
	struct mhi_config		cfg;
	bool				mmio_initialized;

	/* Host control base information */
	struct mhi_host_addr		host_addr;
	struct mhi_addr			ctrl_base;
	struct mhi_addr			data_base;
	struct mhi_addr			ch_ctx_shadow;
	struct mhi_dev_ch_ctx		*ch_ctx_cache;
	dma_addr_t			ch_ctx_cache_dma_handle;
	struct mhi_addr			ev_ctx_shadow;
	struct mhi_dev_ch_ctx		*ev_ctx_cache;
	dma_addr_t			ev_ctx_cache_dma_handle;

	struct mhi_addr			cmd_ctx_shadow;
	struct mhi_dev_ch_ctx		*cmd_ctx_cache;
	dma_addr_t			cmd_ctx_cache_dma_handle;
	struct mhi_dev_ring		*ring;
	struct mhi_dev_channel		*ch;

	int				ctrl_int;
	int				cmd_int;
	/* CHDB and EVDB device interrupt state */
	struct mhi_interrupt_state	chdb[4];
	struct mhi_interrupt_state	evdb[4];

	/* Scheduler work */
	struct work_struct		chdb_ctrl_work;
	struct mutex			mhi_lock;
	struct mutex			mhi_event_lock;

	/* process a ring element */
	struct workqueue_struct		*pending_ring_wq;
	struct work_struct		pending_work;

	struct list_head		event_ring_list;
	struct list_head		process_ring_list;

	uint32_t			cmd_ring_idx;
	uint32_t			ev_ring_start;
	uint32_t			ch_ring_start;

	/* IPA Handles */
	u32				ipa_clnt_hndl[4];
	struct workqueue_struct		*ring_init_wq;
	struct work_struct		ring_init_cb_work;

	/* EP PCIe registration */
	struct ep_pcie_register_event	event_reg;
	u32                             ifc_id;
	struct ep_pcie_hw               *phandle;

	atomic_t			write_active;
	atomic_t			is_suspended;
	struct mutex			mhi_write_test;
	u32				mhi_ep_msi_num;
	u32				mhi_version;
	void				*dma_cache;
	void				*read_handle;
	void				*write_handle;
	/* Physical scratch buffer for writing control data to the host */
	dma_addr_t			cache_dma_handle;
	/*
	 * Physical scratch buffer address used when picking host data
	 * from the host used in mhi_read()
	 */
	dma_addr_t			read_dma_handle;
	/*
	 * Physical scratch buffer address used when writing to the host
	 * region from device used in mhi_write()
	 */
	dma_addr_t			write_dma_handle;
};

enum mhi_msg_level {
	MHI_MSG_VERBOSE = 0x0,
	MHI_MSG_INFO = 0x1,
	MHI_MSG_DBG = 0x2,
	MHI_MSG_WARNING = 0x3,
	MHI_MSG_ERROR = 0x4,
	MHI_MSG_CRITICAL = 0x5,
	MHI_MSG_reserved = 0x80000000
};

extern enum mhi_msg_level mhi_msg_lvl;
extern enum mhi_msg_level mhi_ipc_msg_lvl;
extern void *mhi_ipc_log;

#define mhi_log(_msg_lvl, _msg, ...) do { \
	if (_msg_lvl >= mhi_msg_lvl) { \
		pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
	} \
	if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
		ipc_log_string(mhi_ipc_log,                     \
			"[%s] " _msg, __func__, ##__VA_ARGS__);     \
	} \
} while (0)

/* SW channel client list */
enum mhi_client_channel {
	MHI_CLIENT_LOOPBACK_OUT = 0,
	MHI_CLIENT_LOOPBACK_IN = 1,
	MHI_CLIENT_SAHARA_OUT = 2,
	MHI_CLIENT_SAHARA_IN = 3,
	MHI_CLIENT_DIAG_OUT = 4,
	MHI_CLIENT_DIAG_IN = 5,
	MHI_CLIENT_SSR_OUT = 6,
	MHI_CLIENT_SSR_IN = 7,
	MHI_CLIENT_QDSS_OUT = 8,
	MHI_CLIENT_QDSS_IN = 9,
	MHI_CLIENT_EFS_OUT = 10,
	MHI_CLIENT_EFS_IN = 11,
	MHI_CLIENT_MBIM_OUT = 12,
	MHI_CLIENT_MBIM_IN = 13,
	MHI_CLIENT_QMI_OUT = 14,
	MHI_CLIENT_QMI_IN = 15,
	MHI_CLIENT_IP_CTRL_0_OUT = 16,
	MHI_CLIENT_IP_CTRL_0_IN = 17,
	MHI_CLIENT_IP_CTRL_1_OUT = 18,
	MHI_CLIENT_IP_CTRL_1_IN = 19,
	MHI_CLIENT_DCI_OUT = 20,
	MHI_CLIENT_DCI_IN = 21,
	MHI_CLIENT_IP_CTRL_3_OUT = 22,
	MHI_CLIENT_IP_CTRL_3_IN = 23,
	MHI_CLIENT_IP_CTRL_4_OUT = 24,
	MHI_CLIENT_IP_CTRL_4_IN = 25,
	MHI_CLIENT_IP_CTRL_5_OUT = 26,
	MHI_CLIENT_IP_CTRL_5_IN = 27,
	MHI_CLIENT_IP_CTRL_6_OUT = 28,
	MHI_CLIENT_IP_CTRL_6_IN = 29,
	MHI_CLIENT_IP_CTRL_7_OUT = 30,
	MHI_CLIENT_IP_CTRL_7_IN = 31,
	MHI_CLIENT_DUN_OUT = 32,
	MHI_CLIENT_DUN_IN = 33,
	MHI_CLIENT_IP_SW_0_OUT = 34,
	MHI_CLIENT_IP_SW_0_IN = 35,
	MHI_CLIENT_IP_SW_1_OUT = 36,
	MHI_CLIENT_IP_SW_1_IN = 37,
	MHI_CLIENT_IP_SW_2_OUT = 38,
	MHI_CLIENT_IP_SW_2_IN = 39,
	MHI_CLIENT_IP_SW_3_OUT = 40,
	MHI_CLIENT_IP_SW_3_IN = 41,
	MHI_CLIENT_CSVT_OUT = 42,
	MHI_CLIENT_CSVT_IN = 43,
	MHI_CLIENT_SMCT_OUT = 44,
	MHI_CLIENT_SMCT_IN = 45,
	MHI_MAX_SOFTWARE_CHANNELS = 46,
	MHI_CLIENT_TEST_OUT = 60,
	MHI_CLIENT_TEST_IN = 61,
	MHI_CLIENT_RESERVED_1_LOWER = 62,
	MHI_CLIENT_RESERVED_1_UPPER = 99,
	MHI_CLIENT_IP_HW_0_OUT = 100,
	MHI_CLIENT_IP_HW_0_IN = 101,
	MHI_CLIENT_RESERVED_2_LOWER = 102,
	MHI_CLIENT_RESERVED_2_UPPER = 127,
	MHI_MAX_CHANNELS = 102,
};

struct mhi_dev_iov {
	void		*addr;
	uint32_t	buf_size;
};

/**
 * mhi_dev_open_channel() - Channel open for a given client done prior
 *		to read/write.
 * @chan_id:	Software Channel ID for the assigned client.
 * @handle_client: Structure device for client handle.
 * @notifier: Client issued callback notification.
 */
int mhi_dev_open_channel(uint32_t chan_id,
		struct mhi_dev_client **handle_client,
		void (*event_trigger)(struct mhi_dev_client_cb_reason *cb));
/**
 * mhi_dev_close_channel() - Channel close for a given client.
 */
int mhi_dev_close_channel(struct mhi_dev_client *handle_client);

/**
 * mhi_dev_read_channel() - Channel read for a given client
 * @handle_client:	Client Handle issued during mhi_dev_open_channel
 * @buf: Pointer to the buffer used by the MHI core to copy the data received
 *	 from the Host.
 * @buf_size: Size of the buffer pointer.
 * @chain : Indicate if the received data is part of chained packet.
 */
int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
				void *buf, uint32_t buf_size, uint32_t *chain);

/**
 * mhi_dev_write_channel() - Channel write for a given software client.
 * @handle_client:	Client Handle issued during mhi_dev_open_channel
 * @buf: Pointer to the buffer used by the MHI core to copy the data from the
 *	 device to the host.
 * @buf_size: Size of the buffer pointer.
 */
int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf,
							uint32_t buf_size);

/**
 * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
 * @handle_client:	Client Handle issued during mhi_dev_open_channel
 */
int mhi_dev_channel_isempty(struct mhi_dev_client *handle);

struct mhi_dev_trace {
	unsigned timestamp;
	uint32_t data[TRACE_DATA_MAX];
};

/* MHI Ring related functions */

/**
 * mhi_ring_init() - Initializes the Ring id to the default un-initialized
 *		state. Once a start command is received, the respective ring
 *		is then prepared by fetching the context and updating the
 *		offset.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 * @type:	Command/Event or Channel transfer ring.
 * @id:		Index to the ring id. For command its usually 1, Event rings
 *		may vary from 1 to 128. Channels vary from 1 to 256.
 */
void mhi_ring_init(struct mhi_dev_ring *ring,
			enum mhi_dev_ring_type type, int id);

/**
 * mhi_ring_start() - Fetches the respective transfer ring's context from
 *		the host and updates the write offset.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 * @ctx:	Transfer ring of type mhi_dev_ring_ctx.
 * @dev:	MHI device structure.
 */
int mhi_ring_start(struct mhi_dev_ring *ring,
			union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi);

/**
 * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 * @wr_offset:	Cache the TRE's upto the write offset value.
 */
int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset);

/**
 * mhi_dev_update_wr_offset() - Check for any updates in the write offset.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 */
int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring);

/**
 * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements
 *			    and invoke the clients callback.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 */
int mhi_dev_process_ring(struct mhi_dev_ring *ring);

/**
 * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the
 *			    clients callback.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 * @offset:	Offset index into the respective ring's cache element.
 */
int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);

/**
 * mhi_dev_add_element() - Copy the element to the respective transfer rings
 *			read pointer and increment the index.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 * @element:	Transfer ring element to be copied to the host memory.
 */
int mhi_dev_add_element(struct mhi_dev_ring *ring,
				union mhi_dev_ring_element_type *element);

/**
 * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
 *		from device to the host.
 * @dst_pa:	Physical destination address.
 * @src:	Source virtual address.
 * @len:	Numer of bytes to be transferred.
 * @mhi:	MHI dev structure.
 */
int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
				struct mhi_dev *mhi);

/**
 * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
 *		from host to the device.
 * @dst:	Physical destination virtual address.
 * @src_pa:	Source physical address.
 * @len:	Numer of bytes to be transferred.
 * @mhi:	MHI dev structure.
 */
int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
				struct mhi_dev *mhi);

/**
 * mhi_dev_write_to_host() - memcpy equivalent API to transfer data
 *		from device to host.
 * @host:	Host and device address details.
 * @buf:	Data buffer that needs to be written to the host.
 * @size:	Data buffer size.
 */
void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size,
				struct mhi_dev *mhi);

/**
 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
 *		from host to device.
 * @host:	Host and device address details.
 * @buf:	Data buffer that needs to be read from the host.
 * @size:	Data buffer size.
 */
void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size);

/**
 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
 *		from host to device.
 * @host:	Host and device address details.
 * @buf:	Data buffer that needs to be read from the host.
 * @size:	Data buffer size.
 */
void mhi_ring_set_cb(struct mhi_dev_ring *ring,
			void (*ring_cb)(struct mhi_dev *dev,
			union mhi_dev_ring_element_type *el, void *ctx));

/**
 * mhi_ring_set_state() - Sets internal state of the ring for tracking whether
 *		a ring is being processed, idle or uninitialized.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 * @state:	state of type mhi_dev_ring_state.
 */
void mhi_ring_set_state(struct mhi_dev_ring *ring,
			enum mhi_dev_ring_state state);

/**
 * mhi_ring_get_state() - Obtains the internal state of the ring.
 * @ring:	Ring for the respective context - Channel/Event/Command.
 */
enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring);

/* MMIO related functions */

/**
 * mhi_dev_mmio_read() - Generic MHI MMIO register read API.
 * @dev:	MHI device structure.
 * @offset:	MHI address offset from base.
 * @reg_val:	Pointer the register value is stored to.
 */
int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
			uint32_t *reg_value);

/**
 * mhi_dev_mmio_read() - Generic MHI MMIO register write API.
 * @dev:	MHI device structure.
 * @offset:	MHI address offset from base.
 * @val:	Value to be written to the register offset.
 */
int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
				uint32_t val);

/**
 * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API.
 * @dev:	MHI device structure.
 * @offset:	MHI address offset from base.
 * @mask:	Register field mask.
 * @shift:	Register field mask shift value.
 * @val:	Value to be written to the register offset.
 */
int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
						uint32_t mask, uint32_t shift,
						uint32_t val);
/**
 * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API.
 * @dev:	MHI device structure.
 * @offset:	MHI address offset from base.
 * @mask:	Register field mask.
 * @shift:	Register field mask shift value.
 * @reg_val:	Pointer the register value is stored to.
 */
int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
						uint32_t mask, uint32_t shift,
						uint32_t *reg_val);
/**
 * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
 *		channel id.
 * @dev:	MHI device structure.
 * @chdb_id:	Channel id number.
 */
int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
/**
 * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
 *		channel id.
 * @dev:	MHI device structure.
 * @chdb_id:	Channel id number.
 */
int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);

/**
 * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
 *		event ring id.
 * @dev:	MHI device structure.
 * @erdb_id:	Event ring id number.
 */
int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);

/**
 * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
 *		event ring id.
 * @dev:	MHI device structure.
 * @erdb_id:	Event ring id number.
 */
int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);

/**
 * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
 *		interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
 *		interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell
 *		interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell
 *		interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell
 *		interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell
 *		interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address.
 @dev:	MHI device structure.
 */
int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev);

/**
 * mhi_dev_get_crc_base() - Fetch the Command ring context base address.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev);

/**
 * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
 * @dev:	MHI device structure.
 * @wr_offset:	Pointer of the write offset to be written to.
 */
int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);

/**
 * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID.
 * @dev:	MHI device structure.
 * @wr_offset:	Pointer of the write offset to be written to.
 */
int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);

/**
 * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID.
 * @dev:	MHI device structure.
 * @wr_offset:	Pointer of the write offset to be written to.
 */
int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);

/**
 * mhi_dev_mmio_set_env() - Write the Execution Enviornment.
 * @dev:	MHI device structure.
 * @value:	Value of the EXEC EVN.
 */
int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value);

/**
 * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_reset(struct mhi_dev *dev);

/**
 * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host.
 * @dev:	MHI device structure.
 */
int mhi_dev_get_mhi_addr(struct mhi_dev *dev);

/**
 * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
 * @dev:	MHI device structure.
 * @state:	Pointer of type mhi_dev_state
 */
int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state);

/**
 * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
 *		rings, support number of channels, and offsets to the Channel
 *		and Event doorbell from the host.
 * @dev:	MHI device structure.
 */
int mhi_dev_mmio_init(struct mhi_dev *dev);

/**
 * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by
 *		the host.
 * @dev:	MHI device structure.
 */
int mhi_dev_update_ner(struct mhi_dev *dev);

/**
 * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
 * @dev:	MHI device structure.
 */
int mhi_dev_restore_mmio(struct mhi_dev *dev);

/**
 * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3.
 * @dev:	MHI device structure.
 */
int mhi_dev_backup_mmio(struct mhi_dev *dev);

/**
 * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug.
 * @dev:	MHI device structure.
 */
int mhi_dev_dump_mmio(struct mhi_dev *dev);

/**
 * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation
 *		unit between device and host to map the Data and Control
 *		information.
 * @dev:	MHI device structure.
 */
int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi);

/**
 * mhi_dev_send_state_change_event() - Send state change event to the host
 *		such as M0/M1/M2/M3.
 * @dev:	MHI device structure.
 * @state:	MHI state of type mhi_dev_state
 */
int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
					enum mhi_dev_state state);
/**
 * mhi_dev_send_ee_event() - Send Execution enviornment state change
 *		event to the host.
 * @dev:	MHI device structure.
 * @state:	MHI state of type mhi_dev_execenv
 */
int mhi_dev_send_ee_event(struct mhi_dev *mhi,
					enum mhi_dev_execenv exec_env);
/**
 * mhi_dev_syserr() - System error when unexpected events are received.
 * @dev:	MHI device structure.
 */
int mhi_dev_syserr(struct mhi_dev *mhi);

/**
 * mhi_dev_suspend() - MHI device suspend to stop channel processing at the
 *		Transfer ring boundary, update the channel state to suspended.
 * @dev:	MHI device structure.
 */
int mhi_dev_suspend(struct mhi_dev *mhi);

/**
 * mhi_dev_resume() - MHI device resume to update the channel state to running.
 * @dev:	MHI device structure.
 */
int mhi_dev_resume(struct mhi_dev *mhi);

/**
 * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW
 *		accelerated data to be send and prevent MHI suspend.
 * @dev:	MHI device structure.
 */
int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi);

/**
 * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel
 *		context with IPA when performing a MHI resume.
 * @dev:	MHI device structure.
 */
int mhi_pcie_config_db_routing(struct mhi_dev *mhi);

/**
 * mhi_uci_init() - Initializes the User control interface (UCI) which
 *		exposes device nodes for the supported MHI software
 *		channels.
 */
int mhi_uci_init(void);

void mhi_dev_notify_a7_event(struct mhi_dev *mhi);

#endif /* _MHI_H_ */