aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/platform/msm/ipa/a2_service.c
blob: ae9277ecc3da0e8293ea39b9eb7ac087178aea22 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

/*
 *  A2 service component
 */

#include <net/ip.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/clk.h>
#include <linux/wakelock.h>
#include <mach/sps.h>
#include <mach/msm_smsm.h>
#include <mach/socinfo.h>
#include <mach/ipa.h>
#include "ipa_i.h"

#define A2_NUM_PIPES				6
#define A2_SUMMING_THRESHOLD			4096
#define BUFFER_SIZE				2048
#define NUM_BUFFERS				32
#define BAM_CH_LOCAL_OPEN			0x1
#define BAM_CH_REMOTE_OPEN			0x2
#define BAM_CH_IN_RESET				0x4
#define BAM_MUX_HDR_MAGIC_NO			0x33fc
#define BAM_MUX_HDR_CMD_DATA			0
#define BAM_MUX_HDR_CMD_OPEN			1
#define BAM_MUX_HDR_CMD_CLOSE			2
#define BAM_MUX_HDR_CMD_STATUS			3
#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC		4
#define LOW_WATERMARK				2
#define HIGH_WATERMARK				4
#define A2_MUX_COMPLETION_TIMEOUT		(60*HZ)
#define ENABLE_DISCONNECT_ACK			0x1
#define A2_MUX_PADDING_LENGTH(len)		(4 - ((len) & 0x3))

struct bam_ch_info {
	u32			status;
	a2_mux_notify_cb	notify_cb;
	void			*user_data;
	spinlock_t		lock;
	int			num_tx_pkts;
	int			use_wm;
	u32			v4_hdr_hdl;
	u32			v6_hdr_hdl;
};
struct tx_pkt_info {
	struct sk_buff		*skb;
	char			is_cmd;
	u32			len;
	struct list_head	list_node;
	unsigned		ts_sec;
	unsigned long		ts_nsec;
};
struct bam_mux_hdr {
	u16			magic_num;
	u8			reserved;
	u8			cmd;
	u8			pad_len;
	u8			ch_id;
	u16			pkt_len;
};

struct a2_mux_context_type {
	u32 tethered_prod;
	u32 tethered_cons;
	u32 embedded_prod;
	u32 embedded_cons;
	int a2_mux_apps_pc_enabled;
	struct work_struct kickoff_ul_wakeup;
	struct work_struct kickoff_ul_power_down;
	struct work_struct kickoff_ul_request_resource;
	struct	bam_ch_info bam_ch[A2_MUX_NUM_CHANNELS];
	struct list_head bam_tx_pool;
	spinlock_t bam_tx_pool_spinlock;
	struct workqueue_struct *a2_mux_tx_workqueue;
	struct workqueue_struct *a2_mux_rx_workqueue;
	int a2_mux_initialized;
	bool bam_is_connected;
	bool bam_connect_in_progress;
	int a2_mux_send_power_vote_on_init_once;
	int a2_mux_sw_bridge_is_connected;
	bool a2_mux_dl_wakeup;
	u32 a2_device_handle;
	struct mutex wakeup_lock;
	struct completion ul_wakeup_ack_completion;
	struct completion bam_connection_completion;
	struct completion request_resource_completion;
	struct completion dl_wakeup_completion;
	rwlock_t ul_wakeup_lock;
	int wait_for_ack;
	struct wake_lock bam_wakelock;
	int a2_pc_disabled;
	spinlock_t wakelock_reference_lock;
	int wakelock_reference_count;
	int a2_pc_disabled_wakelock_skipped;
	int disconnect_ack;
	struct mutex smsm_cb_lock;
	int bam_dmux_uplink_vote;
};
static struct a2_mux_context_type *a2_mux_ctx;

static void handle_a2_mux_cmd(struct sk_buff *rx_skb);

static bool bam_ch_is_open(int index)
{
	return a2_mux_ctx->bam_ch[index].status ==
		(BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN);
}

static bool bam_ch_is_local_open(int index)
{
	return a2_mux_ctx->bam_ch[index].status &
		BAM_CH_LOCAL_OPEN;
}

static bool bam_ch_is_remote_open(int index)
{
	return a2_mux_ctx->bam_ch[index].status &
		BAM_CH_REMOTE_OPEN;
}

static bool bam_ch_is_in_reset(int index)
{
	return a2_mux_ctx->bam_ch[index].status &
		BAM_CH_IN_RESET;
}

static void set_tx_timestamp(struct tx_pkt_info *pkt)
{
	unsigned long long t_now;

	t_now = sched_clock();
	pkt->ts_nsec = do_div(t_now, 1000000000U);
	pkt->ts_sec = (unsigned)t_now;
}

static void verify_tx_queue_is_empty(const char *func)
{
	unsigned long flags;
	struct tx_pkt_info *info;
	int reported = 0;

	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
	list_for_each_entry(info, &a2_mux_ctx->bam_tx_pool, list_node) {
		if (!reported) {
			IPADBG("%s: tx pool not empty\n", func);
			reported = 1;
		}
		IPADBG("%s: node=%p ts=%u.%09lu\n", __func__,
			&info->list_node, info->ts_sec, info->ts_nsec);
	}
	spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
}

static void grab_wakelock(void)
{
	unsigned long flags;

	spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
	IPADBG("%s: ref count = %d\n",
		__func__,
		a2_mux_ctx->wakelock_reference_count);
	if (a2_mux_ctx->wakelock_reference_count == 0)
		wake_lock(&a2_mux_ctx->bam_wakelock);
	++a2_mux_ctx->wakelock_reference_count;
	spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
}

static void release_wakelock(void)
{
	unsigned long flags;

	spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
	if (a2_mux_ctx->wakelock_reference_count == 0) {
		IPAERR("%s: bam_dmux wakelock not locked\n", __func__);
		dump_stack();
		spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock,
				       flags);
		return;
	}
	IPADBG("%s: ref count = %d\n",
		__func__,
		a2_mux_ctx->wakelock_reference_count);
	--a2_mux_ctx->wakelock_reference_count;
	if (a2_mux_ctx->wakelock_reference_count == 0)
		wake_unlock(&a2_mux_ctx->bam_wakelock);
	spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
}

static void toggle_apps_ack(void)
{
	static unsigned int clear_bit; /* 0 = set the bit, else clear bit */

	IPADBG("%s: apps ack %d->%d\n", __func__,
			clear_bit & 0x1, ~clear_bit & 0x1);
	smsm_change_state(SMSM_APPS_STATE,
				clear_bit & SMSM_A2_POWER_CONTROL_ACK,
				~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
	IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_apps_acks);
	clear_bit = ~clear_bit;
}

static void power_vote(int vote)
{
	IPADBG("%s: curr=%d, vote=%d\n",
		__func__,
		a2_mux_ctx->bam_dmux_uplink_vote, vote);
	if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
		IPADBG("%s: warning - duplicate power vote\n", __func__);
	a2_mux_ctx->bam_dmux_uplink_vote = vote;
	if (vote) {
		smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_out);
	} else {
		smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_out);
	}
}

static inline void ul_powerdown(void)
{
	IPADBG("%s: powerdown\n", __func__);
	verify_tx_queue_is_empty(__func__);
	if (a2_mux_ctx->a2_pc_disabled)
		release_wakelock();
	else {
		a2_mux_ctx->wait_for_ack = 1;
		INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
		power_vote(0);
	}
}

static void ul_wakeup(void)
{
	int ret;

	mutex_lock(&a2_mux_ctx->wakeup_lock);
	if (a2_mux_ctx->bam_is_connected &&
				!a2_mux_ctx->bam_connect_in_progress) {
		IPADBG("%s Already awake\n", __func__);
		mutex_unlock(&a2_mux_ctx->wakeup_lock);
		return;
	}
	if (a2_mux_ctx->a2_pc_disabled) {
		/*
		 * don't grab the wakelock the first time because it is
		 * already grabbed when a2 powers on
		 */
		if (likely(a2_mux_ctx->a2_pc_disabled_wakelock_skipped))
			grab_wakelock();
		else
			a2_mux_ctx->a2_pc_disabled_wakelock_skipped = 1;
		mutex_unlock(&a2_mux_ctx->wakeup_lock);
		return;
	}
	/*
	 * must wait for the previous power down request to have been acked
	 * chances are it already came in and this will just fall through
	 * instead of waiting
	 */
	if (a2_mux_ctx->wait_for_ack) {
		IPADBG("%s waiting for previous ack\n", __func__);
		ret = wait_for_completion_timeout(
					&a2_mux_ctx->ul_wakeup_ack_completion,
					A2_MUX_COMPLETION_TIMEOUT);
		a2_mux_ctx->wait_for_ack = 0;
		if (unlikely(ret == 0)) {
			IPAERR("%s previous ack from modem timed out\n",
				__func__);
			goto bail;
		}
	}
	INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
	power_vote(1);
	IPADBG("%s waiting for wakeup ack\n", __func__);
	ret = wait_for_completion_timeout(&a2_mux_ctx->ul_wakeup_ack_completion,
					A2_MUX_COMPLETION_TIMEOUT);
	if (unlikely(ret == 0)) {
		IPAERR("%s wakup ack from modem timed out\n", __func__);
		goto bail;
	}
	INIT_COMPLETION(a2_mux_ctx->bam_connection_completion);
	if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
		ret = wait_for_completion_timeout(
			&a2_mux_ctx->bam_connection_completion,
			A2_MUX_COMPLETION_TIMEOUT);
		if (unlikely(ret == 0)) {
			IPAERR("%s modem power on timed out\n", __func__);
			goto bail;
		}
	}
	IPADBG("%s complete\n", __func__);
	mutex_unlock(&a2_mux_ctx->wakeup_lock);
	return;
bail:
	mutex_unlock(&a2_mux_ctx->wakeup_lock);
	BUG();
	return;
}

static void a2_mux_write_done(bool is_tethered, struct sk_buff *skb)
{
	struct tx_pkt_info *info;
	enum a2_mux_logical_channel_id lcid;
	unsigned long event_data;
	unsigned long flags;

	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
	info = list_first_entry(&a2_mux_ctx->bam_tx_pool,
			struct tx_pkt_info, list_node);
	if (unlikely(info->skb != skb)) {
		struct tx_pkt_info *errant_pkt;

		IPAERR("tx_pool mismatch next=%p list_node=%p, ts=%u.%09lu\n",
				a2_mux_ctx->bam_tx_pool.next,
				&info->list_node,
				info->ts_sec, info->ts_nsec
				);

		list_for_each_entry(errant_pkt,
				    &a2_mux_ctx->bam_tx_pool, list_node) {
			IPAERR("%s: node=%p ts=%u.%09lu\n", __func__,
			&errant_pkt->list_node, errant_pkt->ts_sec,
			errant_pkt->ts_nsec);
			if (errant_pkt->skb == skb)
				info = errant_pkt;

		}
		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
				       flags);
		BUG();
	}
	list_del(&info->list_node);
	spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
	if (info->is_cmd) {
		dev_kfree_skb_any(info->skb);
		kfree(info);
		return;
	}
	skb = info->skb;
	kfree(info);
	event_data = (unsigned long)(skb);
	if (is_tethered)
		lcid = A2_MUX_TETHERED_0;
	else {
		struct bam_mux_hdr *hdr = (struct bam_mux_hdr *)skb->data;
		lcid = (enum a2_mux_logical_channel_id) hdr->ch_id;
	}
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	a2_mux_ctx->bam_ch[lcid].num_tx_pkts--;
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	if (a2_mux_ctx->bam_ch[lcid].notify_cb)
		a2_mux_ctx->bam_ch[lcid].notify_cb(
			a2_mux_ctx->bam_ch[lcid].user_data, A2_MUX_WRITE_DONE,
							event_data);
	else
		dev_kfree_skb_any(skb);
}

static bool a2_mux_kickoff_ul_power_down(void)

{
	bool is_connected;

	write_lock(&a2_mux_ctx->ul_wakeup_lock);
	if (a2_mux_ctx->bam_connect_in_progress) {
		a2_mux_ctx->bam_is_connected = false;
		is_connected = true;
	} else {
		is_connected = a2_mux_ctx->bam_is_connected;
		a2_mux_ctx->bam_is_connected = false;
		if (is_connected) {
			a2_mux_ctx->bam_connect_in_progress = true;
			queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
				&a2_mux_ctx->kickoff_ul_power_down);
		}
	}
	write_unlock(&a2_mux_ctx->ul_wakeup_lock);
	return is_connected;
}

static bool a2_mux_kickoff_ul_wakeup(void)
{
	bool is_connected;

	write_lock(&a2_mux_ctx->ul_wakeup_lock);
	if (a2_mux_ctx->bam_connect_in_progress) {
		a2_mux_ctx->bam_is_connected = true;
		is_connected = false;
	} else {
		is_connected = a2_mux_ctx->bam_is_connected;
		a2_mux_ctx->bam_is_connected = true;
		if (!is_connected) {
			a2_mux_ctx->bam_connect_in_progress = true;
			queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
				&a2_mux_ctx->kickoff_ul_wakeup);
		}
	}
	write_unlock(&a2_mux_ctx->ul_wakeup_lock);
	return is_connected;
}

static void kickoff_ul_power_down_func(struct work_struct *work)
{
	bool is_connected;

	IPADBG("%s: UL active - forcing powerdown\n", __func__);
	ul_powerdown();
	write_lock(&a2_mux_ctx->ul_wakeup_lock);
	is_connected = a2_mux_ctx->bam_is_connected;
	a2_mux_ctx->bam_is_connected = false;
	a2_mux_ctx->bam_connect_in_progress = false;
	write_unlock(&a2_mux_ctx->ul_wakeup_lock);
	if (is_connected)
		a2_mux_kickoff_ul_wakeup();
	else
		ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
						IPA_RM_RESOURCE_A2_CONS);
}

static void kickoff_ul_wakeup_func(struct work_struct *work)
{
	bool is_connected;
	int ret;

	ul_wakeup();
	write_lock(&a2_mux_ctx->ul_wakeup_lock);
	is_connected = a2_mux_ctx->bam_is_connected;
	a2_mux_ctx->bam_is_connected = true;
	a2_mux_ctx->bam_connect_in_progress = false;
	write_unlock(&a2_mux_ctx->ul_wakeup_lock);
	if (is_connected)
		ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
				IPA_RM_RESOURCE_A2_CONS);
	INIT_COMPLETION(a2_mux_ctx->dl_wakeup_completion);
	if (!a2_mux_ctx->a2_mux_dl_wakeup) {
		ret = wait_for_completion_timeout(
			&a2_mux_ctx->dl_wakeup_completion,
			A2_MUX_COMPLETION_TIMEOUT);
		if (unlikely(ret == 0)) {
			IPAERR("%s timeout waiting for A2 PROD granted\n",
				__func__);
			BUG();
			return;
		}
	}
	if (!is_connected)
		a2_mux_kickoff_ul_power_down();
}

static void kickoff_ul_request_resource_func(struct work_struct *work)
{
	int ret;

	INIT_COMPLETION(a2_mux_ctx->request_resource_completion);
	ret = ipa_rm_request_resource(IPA_RM_RESOURCE_A2_PROD);
	if (ret < 0 && ret != -EINPROGRESS) {
		IPAERR("%s: ipa_rm_request_resource failed %d\n", __func__,
		       ret);
		return;
	}
	if (ret == -EINPROGRESS) {
		ret = wait_for_completion_timeout(
			&a2_mux_ctx->request_resource_completion,
			A2_MUX_COMPLETION_TIMEOUT);
		if (unlikely(ret == 0)) {
			IPAERR("%s timeout waiting for A2 PROD granted\n",
				__func__);
			BUG();
			return;
		}
	}
	toggle_apps_ack();
	a2_mux_ctx->a2_mux_dl_wakeup = true;
	complete_all(&a2_mux_ctx->dl_wakeup_completion);
}

static void ipa_embedded_notify(void *priv,
				enum ipa_dp_evt_type evt,
				unsigned long data)
{
	switch (evt) {
	case IPA_RECEIVE:
		handle_a2_mux_cmd((struct sk_buff *)data);
		break;
	case IPA_WRITE_DONE:
		a2_mux_write_done(false, (struct sk_buff *)data);
		break;
	default:
		IPAERR("%s: Unknown event %d\n", __func__, evt);
		break;
	}
}

static void ipa_tethered_notify(void *priv,
				enum ipa_dp_evt_type evt,
				unsigned long data)
{
	IPADBG("%s: event = %d\n", __func__, evt);
	switch (evt) {
	case IPA_RECEIVE:
		if (a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb)
			a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb(
				a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].user_data,
				A2_MUX_RECEIVE,
				data);
		break;
	case IPA_WRITE_DONE:
		a2_mux_write_done(true, (struct sk_buff *)data);
		break;
	default:
		IPAERR("%s: Unknown event %d\n", __func__, evt);
		break;
	}
}

static int connect_to_bam(void)
{
	int ret;
	struct ipa_sys_connect_params connect_params;

	IPAERR("%s:\n", __func__);
	if (a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
		IPAERR("%s: SW bridge is already UP\n",
				__func__);
		return -EFAULT;
	}
	if (sps_ctrl_bam_dma_clk(true))
		WARN_ON(1);
	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
	connect_params.client = IPA_CLIENT_A2_TETHERED_CONS;
	connect_params.notify = ipa_tethered_notify;
	connect_params.desc_fifo_sz = 0x800;
	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
			&connect_params,
			&a2_mux_ctx->tethered_prod);
	if (ret) {
		IPAERR("%s: IPA bridge tethered UL failed to connect: %d\n",
				__func__, ret);
		goto bridge_tethered_ul_failed;
	}
	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
	connect_params.ipa_ep_cfg.mode.mode = IPA_DMA;
	connect_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
	connect_params.client = IPA_CLIENT_A2_TETHERED_PROD;
	connect_params.notify = ipa_tethered_notify;
	connect_params.desc_fifo_sz = 0x800;
	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
			&connect_params,
			&a2_mux_ctx->tethered_cons);
	if (ret) {
		IPAERR("%s: IPA bridge tethered DL failed to connect: %d\n",
				__func__, ret);
		goto bridge_tethered_dl_failed;
	}
	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
	connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
	connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
	connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 6;
	connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
	connect_params.notify = ipa_embedded_notify;
	connect_params.desc_fifo_sz = 0x800;
	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
			&connect_params,
			&a2_mux_ctx->embedded_prod);
	if (ret) {
		IPAERR("%s: IPA bridge embedded UL failed to connect: %d\n",
				__func__, ret);
		goto bridge_embedded_ul_failed;
	}
	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
	connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
	connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
	connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 4;
	connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
	connect_params.notify = ipa_embedded_notify;
	connect_params.desc_fifo_sz = 0x800;
	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
			&connect_params,
			&a2_mux_ctx->embedded_cons);
	if (ret) {
		IPAERR("%s: IPA bridge embedded DL failed to connect: %d\n",
		       __func__, ret);
		goto bridge_embedded_dl_failed;
	}
	a2_mux_ctx->a2_mux_sw_bridge_is_connected = 1;
	complete_all(&a2_mux_ctx->bam_connection_completion);
	return 0;

bridge_embedded_dl_failed:
	ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
			a2_mux_ctx->embedded_prod);
bridge_embedded_ul_failed:
	ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
			a2_mux_ctx->tethered_cons);
bridge_tethered_dl_failed:
	ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
			a2_mux_ctx->tethered_prod);
bridge_tethered_ul_failed:
	if (sps_ctrl_bam_dma_clk(false))
		WARN_ON(1);
	return ret;
}

static int disconnect_to_bam(void)
{
	int ret;

	IPAERR("%s\n", __func__);
	if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
		IPAERR("%s: SW bridge is already DOWN\n",
				__func__);
		return -EFAULT;
	}
	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
			a2_mux_ctx->tethered_prod);
	if (ret) {
		IPAERR("%s: IPA bridge tethered UL failed to disconnect: %d\n",
				__func__, ret);
		return ret;
	}
	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
			a2_mux_ctx->tethered_cons);
	if (ret) {
		IPAERR("%s: IPA bridge tethered DL failed to disconnect: %d\n",
				__func__, ret);
		return ret;
	}
	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
			a2_mux_ctx->embedded_prod);
	if (ret) {
		IPAERR("%s: IPA bridge embedded UL failed to disconnect: %d\n",
				__func__, ret);
		return ret;
	}
	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
			a2_mux_ctx->embedded_cons);
	if (ret) {
		IPAERR("%s: IPA bridge embedded DL failed to disconnect: %d\n",
				__func__, ret);
		return ret;
	}
	if (sps_ctrl_bam_dma_clk(false))
		WARN_ON(1);
	verify_tx_queue_is_empty(__func__);
	(void) ipa_rm_release_resource(IPA_RM_RESOURCE_A2_PROD);
	if (a2_mux_ctx->disconnect_ack)
		toggle_apps_ack();
	a2_mux_ctx->a2_mux_dl_wakeup = false;
	a2_mux_ctx->a2_mux_sw_bridge_is_connected = 0;
	complete_all(&a2_mux_ctx->bam_connection_completion);
	return 0;
}

static void a2_mux_smsm_cb(void *priv,
		u32 old_state,
		u32 new_state)
{
	static int last_processed_state;

	mutex_lock(&a2_mux_ctx->smsm_cb_lock);
	IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
			new_state);
	if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
		IPADBG("%s: already processed this state\n", __func__);
		mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
		return;
	}
	last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
	if (new_state & SMSM_A2_POWER_CONTROL) {
		IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_in);
		grab_wakelock();
		(void) connect_to_bam();
		queue_work(a2_mux_ctx->a2_mux_rx_workqueue,
			   &a2_mux_ctx->kickoff_ul_request_resource);
	} else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
		IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
		IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_in);
		(void) disconnect_to_bam();
		release_wakelock();
	} else {
		IPAERR("%s: unsupported state change\n", __func__);
	}
	mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
}

static void a2_mux_smsm_ack_cb(void *priv, u32 old_state,
						u32 new_state)
{
	IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
			new_state);
	IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_modem_acks);
	complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
}

static int a2_mux_pm_rm_request_resource(void)
{
	int result = 0;
	bool is_connected;

	is_connected = a2_mux_kickoff_ul_wakeup();
	if (!is_connected)
		result = -EINPROGRESS;
	return result;
}

static int a2_mux_pm_rm_release_resource(void)
{
	int result = 0;
	bool is_connected;

	is_connected = a2_mux_kickoff_ul_power_down();
	if (is_connected)
		result = -EINPROGRESS;
	return result;
}

static void a2_mux_pm_rm_notify_cb(void *user_data,
		enum ipa_rm_event event,
		unsigned long data)
{
	switch (event) {
	case IPA_RM_RESOURCE_GRANTED:
		IPADBG("%s: PROD GRANTED CB\n", __func__);
		complete_all(&a2_mux_ctx->request_resource_completion);
		break;
	case IPA_RM_RESOURCE_RELEASED:
		IPADBG("%s: PROD RELEASED CB\n", __func__);
		break;
	default:
		return;
	}
}
static int a2_mux_pm_initialize_rm(void)
{
	struct ipa_rm_create_params create_params;
	int result;

	memset(&create_params, 0, sizeof(create_params));
	create_params.name = IPA_RM_RESOURCE_A2_PROD;
	create_params.reg_params.notify_cb = &a2_mux_pm_rm_notify_cb;
	result = ipa_rm_create_resource(&create_params);
	if (result)
		goto bail;
	memset(&create_params, 0, sizeof(create_params));
	create_params.name = IPA_RM_RESOURCE_A2_CONS;
	create_params.release_resource = &a2_mux_pm_rm_release_resource;
	create_params.request_resource = &a2_mux_pm_rm_request_resource;
	result = ipa_rm_create_resource(&create_params);
bail:
	return result;
}

static void a2_mux_process_data(struct sk_buff *rx_skb)
{
	unsigned long flags;
	struct bam_mux_hdr *rx_hdr;
	unsigned long event_data;

	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
	rx_skb->data = (unsigned char *)(rx_hdr + 1);
	rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
	rx_skb->len = rx_hdr->pkt_len;
	rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
	event_data = (unsigned long)(rx_skb);
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
	if (a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb)
		a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb(
			a2_mux_ctx->bam_ch[rx_hdr->ch_id].user_data,
			A2_MUX_RECEIVE,
			event_data);
	else
		dev_kfree_skb_any(rx_skb);
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
			       flags);
}

static void handle_a2_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
{
	unsigned long flags;

	spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
	a2_mux_ctx->bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
	a2_mux_ctx->bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
			       flags);
}

static void handle_a2_mux_cmd(struct sk_buff *rx_skb)
{
	unsigned long flags;
	struct bam_mux_hdr *rx_hdr;

	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
	IPADBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n",
			__func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
	rx_hdr->magic_num = ntohs(rx_hdr->magic_num);
	rx_hdr->pkt_len = ntohs(rx_hdr->pkt_len);
	IPADBG("%s: converted to host order magic_num=%d, pkt_len=%d\n",
	    __func__, rx_hdr->magic_num, rx_hdr->pkt_len);
	if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
		IPAERR("bad hdr magic %x rvd %d cmd %d pad %d ch %d len %d\n",
		       rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		return;
	}
	if (rx_hdr->ch_id >= A2_MUX_NUM_CHANNELS) {
		IPAERR("bad LCID %d rsvd %d cmd %d pad %d ch %d len %d\n",
			rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		return;
	}
	switch (rx_hdr->cmd) {
	case BAM_MUX_HDR_CMD_DATA:
		a2_mux_process_data(rx_skb);
		break;
	case BAM_MUX_HDR_CMD_OPEN:
		IPADBG("%s: opening cid %d PC enabled\n", __func__,
				rx_hdr->ch_id);
		handle_a2_mux_cmd_open(rx_hdr);
		if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
			IPADBG("%s: deactivating disconnect ack\n",
								__func__);
			a2_mux_ctx->disconnect_ack = 0;
		}
		dev_kfree_skb_any(rx_skb);
		if (a2_mux_ctx->a2_mux_send_power_vote_on_init_once) {
			kickoff_ul_wakeup_func(NULL);
			a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 0;
		}
		break;
	case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
		IPADBG("%s: opening cid %d PC disabled\n", __func__,
				rx_hdr->ch_id);
		if (!a2_mux_ctx->a2_pc_disabled) {
			a2_mux_ctx->a2_pc_disabled = 1;
			ul_wakeup();
		}
		handle_a2_mux_cmd_open(rx_hdr);
		dev_kfree_skb_any(rx_skb);
		break;
	case BAM_MUX_HDR_CMD_CLOSE:
		/* probably should drop pending write */
		IPADBG("%s: closing cid %d\n", __func__,
				rx_hdr->ch_id);
		spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
				  flags);
		a2_mux_ctx->bam_ch[rx_hdr->ch_id].status &=
			~BAM_CH_REMOTE_OPEN;
		spin_unlock_irqrestore(
			&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
		dev_kfree_skb_any(rx_skb);
		break;
	default:
		IPAERR("bad hdr.magic %x rvd %d cmd %d pad %d ch %d len %d\n",
			rx_hdr->magic_num, rx_hdr->reserved,
			rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
			rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		return;
	}
}

static int a2_mux_write_cmd(void *data, u32 len)
{
	int rc;
	struct tx_pkt_info *pkt;
	unsigned long flags;

	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
	if (pkt == NULL) {
		IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
		return -ENOMEM;
	}
	pkt->skb = __dev_alloc_skb(len, GFP_NOWAIT | __GFP_NOWARN);
	if (pkt->skb == NULL) {
		IPAERR("%s: unable to alloc skb\n\n", __func__);
		kfree(pkt);
		return -ENOMEM;
	}
	memcpy(skb_put(pkt->skb, len), data, len);
	kfree(data);
	pkt->len = len;
	pkt->is_cmd = 1;
	set_tx_timestamp(pkt);
	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
	list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
	rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, pkt->skb, NULL);
	if (rc) {
		IPAERR("%s ipa_tx_dp failed rc=%d\n",
			__func__, rc);
		list_del(&pkt->list_node);
		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
				       flags);
		dev_kfree_skb_any(pkt->skb);
		kfree(pkt);
	} else {
		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
				       flags);
	}
	return rc;
}

/**
 * a2_mux_get_tethered_client_handles() - provide the tethred
 *		pipe handles for post setup configuration
 * @lcid: logical channel ID
 * @clnt_cons_handle: [out] consumer pipe handle
 * @clnt_prod_handle: [out] producer pipe handle
 *
 * Returns: 0 on success, negative on failure
 */
int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
		unsigned int *clnt_cons_handle,
		unsigned int *clnt_prod_handle)
{
	if (!a2_mux_ctx->a2_mux_initialized || lcid != A2_MUX_TETHERED_0)
		return -ENODEV;
	if (!clnt_cons_handle || !clnt_prod_handle)
		return -EINVAL;
	*clnt_prod_handle = a2_mux_ctx->tethered_prod;
	*clnt_cons_handle = a2_mux_ctx->tethered_cons;
	return 0;
}

/**
 * a2_mux_write() - send the packet to A2,
 *		add MUX header acc to lcid provided
 * @id: logical channel ID
 * @skb: SKB to write
 *
 * Returns: 0 on success, negative on failure
 */
int a2_mux_write(enum a2_mux_logical_channel_id id, struct sk_buff *skb)
{
	int rc = 0;
	struct bam_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb = NULL;
	struct tx_pkt_info *pkt;
	bool is_connected;

	if (id >= A2_MUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!a2_mux_ctx->a2_mux_initialized)
		return -ENODEV;
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
	if (!bam_ch_is_open(id)) {
		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
		IPAERR("%s: port not open: %d\n",
		       __func__,
		       a2_mux_ctx->bam_ch[id].status);
		return -ENODEV;
	}
	if (a2_mux_ctx->bam_ch[id].use_wm &&
	    (a2_mux_ctx->bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
		IPAERR("%s: watermark exceeded: %d\n", __func__, id);
		return -EAGAIN;
	}
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
	read_lock(&a2_mux_ctx->ul_wakeup_lock);
	is_connected = a2_mux_ctx->bam_is_connected &&
					!a2_mux_ctx->bam_connect_in_progress;
	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
	if (!is_connected)
		return -ENODEV;
	if (id != A2_MUX_TETHERED_0) {
		/*
		 * if skb do not have any tailroom for padding
		 * copy the skb into a new expanded skb
		 */
		if ((skb->len & 0x3) &&
		    (skb_tailroom(skb) < A2_MUX_PADDING_LENGTH(skb->len))) {
			new_skb = skb_copy_expand(skb, skb_headroom(skb),
					A2_MUX_PADDING_LENGTH(skb->len),
					GFP_ATOMIC);
			if (new_skb == NULL) {
				IPAERR("%s: cannot allocate skb\n", __func__);
				rc = -ENOMEM;
				goto write_fail;
			}
			dev_kfree_skb_any(skb);
			skb = new_skb;
		}
		hdr = (struct bam_mux_hdr *)skb_push(
					skb, sizeof(struct bam_mux_hdr));
		/*
		 * caller should allocate for hdr and padding
		 * hdr is fine, padding is tricky
		 */
		hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
		hdr->cmd = BAM_MUX_HDR_CMD_DATA;
		hdr->reserved = 0;
		hdr->ch_id = id;
		hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
		if (skb->len & 0x3)
			skb_put(skb, A2_MUX_PADDING_LENGTH(skb->len));
		hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) +
					   hdr->pkt_len);
		IPADBG("data %p, tail %p skb len %d pkt len %d pad len %d\n",
		    skb->data, skb->tail, skb->len,
		    hdr->pkt_len, hdr->pad_len);
		hdr->magic_num = htons(hdr->magic_num);
		hdr->pkt_len = htons(hdr->pkt_len);
		IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
		    hdr->magic_num, hdr->pkt_len);
	}
	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
	if (pkt == NULL) {
		IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
		rc = -ENOMEM;
		goto write_fail2;
	}
	pkt->skb = skb;
	pkt->is_cmd = 0;
	set_tx_timestamp(pkt);
	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
	list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
	if (id == A2_MUX_TETHERED_0)
		rc = ipa_tx_dp(IPA_CLIENT_A2_TETHERED_CONS, skb, NULL);
	else
		rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, skb, NULL);
	if (rc) {
		IPAERR("%s ipa_tx_dp failed rc=%d\n",
			__func__, rc);
		list_del(&pkt->list_node);
		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
				       flags);
		goto write_fail3;
	} else {
		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
				       flags);
		spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
		a2_mux_ctx->bam_ch[id].num_tx_pkts++;
		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
	}
	return 0;

write_fail3:
	kfree(pkt);
write_fail2:
	if (new_skb)
		dev_kfree_skb_any(new_skb);
write_fail:
	return rc;
}

/**
 * a2_mux_add_hdr() - called when MUX header should
 *		be added
 * @lcid: logical channel ID
 *
 * Returns: 0 on success, negative on failure
 */
static int a2_mux_add_hdr(enum a2_mux_logical_channel_id lcid)
{
	struct ipa_ioc_add_hdr *hdrs;
	struct ipa_hdr_add *ipv4_hdr;
	struct ipa_hdr_add *ipv6_hdr;
	struct bam_mux_hdr *dmux_hdr;
	int rc;

	IPADBG("%s: ch %d\n", __func__, lcid);

	if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
		IPAERR("%s: non valid lcid passed: %d\n", __func__, lcid);
		return -EINVAL;
	}


	hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
		       2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
	if (!hdrs) {
		IPAERR("%s: hdr allocation fail for ch %d\n", __func__, lcid);
		return -ENOMEM;
	}

	ipv4_hdr = &hdrs->hdr[0];
	ipv6_hdr = &hdrs->hdr[1];

	dmux_hdr = (struct bam_mux_hdr *)ipv4_hdr->hdr;
	snprintf(ipv4_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
		 A2_MUX_HDR_NAME_V4_PREF, lcid);
	dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
	dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
	dmux_hdr->reserved = 0;
	dmux_hdr->ch_id = lcid;

	/* Packet lenght is added by IPA */
	dmux_hdr->pkt_len = 0;
	dmux_hdr->pad_len = 0;

	dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
	IPADBG("converted to network order magic_num=%d\n",
		    dmux_hdr->magic_num);

	ipv4_hdr->hdr_len = sizeof(struct bam_mux_hdr);
	ipv4_hdr->is_partial = 0;

	dmux_hdr = (struct bam_mux_hdr *)ipv6_hdr->hdr;
	snprintf(ipv6_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
		 A2_MUX_HDR_NAME_V6_PREF, lcid);
	dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
	dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
	dmux_hdr->reserved = 0;
	dmux_hdr->ch_id = lcid;

	/* Packet lenght is added by IPA */
	dmux_hdr->pkt_len = 0;
	dmux_hdr->pad_len = 0;

	dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
	IPADBG("converted to network order magic_num=%d\n",
		    dmux_hdr->magic_num);

	ipv6_hdr->hdr_len = sizeof(struct bam_mux_hdr);
	ipv6_hdr->is_partial = 0;

	hdrs->commit = 1;
	hdrs->num_hdrs = 2;

	rc = ipa_add_hdr(hdrs);
	if (rc) {
		IPAERR("Fail on Header-Insertion(%d)\n", rc);
		goto bail;
	}

	if (ipv4_hdr->status) {
		IPAERR("Fail on Header-Insertion ipv4(%d)\n",
				ipv4_hdr->status);
		rc = ipv4_hdr->status;
		goto bail;
	}

	if (ipv6_hdr->status) {
		IPAERR("%s: Fail on Header-Insertion ipv4(%d)\n", __func__,
				ipv6_hdr->status);
		rc = ipv6_hdr->status;
		goto bail;
	}

	a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = ipv4_hdr->hdr_hdl;
	a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = ipv6_hdr->hdr_hdl;

	rc = 0;
bail:
	kfree(hdrs);
	return rc;
}

/**
 * a2_mux_del_hdr() - called when MUX header should
 *		be removed
 * @lcid: logical channel ID
 *
 * Returns: 0 on success, negative on failure
 */
static int a2_mux_del_hdr(enum a2_mux_logical_channel_id lcid)
{
	struct ipa_ioc_del_hdr *hdrs;
	struct ipa_hdr_del *ipv4_hdl;
	struct ipa_hdr_del *ipv6_hdl;
	int rc;

	IPADBG("%s: ch %d\n", __func__, lcid);

	if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
		IPAERR("invalid lcid passed: %d\n", lcid);
		return -EINVAL;
	}


	hdrs = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
		       2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
	if (!hdrs) {
		IPAERR("hdr alloc fail for ch %d\n", lcid);
		return -ENOMEM;
	}

	ipv4_hdl = &hdrs->hdl[0];
	ipv6_hdl = &hdrs->hdl[1];

	ipv4_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl;
	ipv6_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl;

	hdrs->commit = 1;
	hdrs->num_hdls = 2;

	rc = ipa_del_hdr(hdrs);
	if (rc) {
		IPAERR("Fail on Del Header-Insertion(%d)\n", rc);
		goto bail;
	}

	if (ipv4_hdl->status) {
		IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
				ipv4_hdl->status);
		rc = ipv4_hdl->status;
		goto bail;
	}
	a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = 0;

	if (ipv6_hdl->status) {
		IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
				ipv6_hdl->status);
		rc = ipv6_hdl->status;
		goto bail;
	}
	a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = 0;

	rc = 0;
bail:
	kfree(hdrs);
	return rc;

}

/**
 * a2_mux_open_channel() - opens logical channel
 *		to A2
 * @lcid: logical channel ID
 * @user_data: user provided data for below CB
 * @notify_cb: user provided notification CB
 *
 * Returns: 0 on success, negative on failure
 */
int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
			void *user_data,
			a2_mux_notify_cb notify_cb)
{
	struct bam_mux_hdr *hdr;
	unsigned long flags;
	int rc = 0;
	bool is_connected;

	IPADBG("%s: opening ch %d\n", __func__, lcid);
	if (!a2_mux_ctx->a2_mux_initialized) {
		IPAERR("%s: not inititialized\n", __func__);
		return -ENODEV;
	}
	if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) {
		IPAERR("%s: invalid channel id %d\n", __func__, lcid);
		return -EINVAL;
	}
	if (notify_cb == NULL) {
		IPAERR("%s: notify function is NULL\n", __func__);
		return -EINVAL;
	}
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	if (bam_ch_is_open(lcid)) {
		IPAERR("%s: Already opened %d\n", __func__, lcid);
		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
		goto open_done;
	}
	if (!bam_ch_is_remote_open(lcid)) {
		IPAERR("%s: Remote not open; ch: %d\n", __func__, lcid);
		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
		return -ENODEV;
	}
	a2_mux_ctx->bam_ch[lcid].notify_cb = notify_cb;
	a2_mux_ctx->bam_ch[lcid].user_data = user_data;
	a2_mux_ctx->bam_ch[lcid].status |= BAM_CH_LOCAL_OPEN;
	a2_mux_ctx->bam_ch[lcid].num_tx_pkts = 0;
	a2_mux_ctx->bam_ch[lcid].use_wm = 0;
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	read_lock(&a2_mux_ctx->ul_wakeup_lock);
	is_connected = a2_mux_ctx->bam_is_connected &&
					!a2_mux_ctx->bam_connect_in_progress;
	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
	if (!is_connected)
		return -ENODEV;
	if (lcid != A2_MUX_TETHERED_0) {
		hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
		if (hdr == NULL) {
			IPAERR("%s: hdr kmalloc failed. ch: %d\n",
			       __func__, lcid);
			return -ENOMEM;
		}
		hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
		if (a2_mux_ctx->a2_mux_apps_pc_enabled) {
			hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
		} else {
			IPAERR("%s: PC DISABLED BY A5 SW BY INTENTION\n",
					__func__);
			a2_mux_ctx->a2_pc_disabled = 1;
			hdr->cmd = BAM_MUX_HDR_CMD_OPEN_NO_A2_PC;
		}
		hdr->reserved = 0;
		hdr->ch_id = lcid;
		hdr->pkt_len = 0;
		hdr->pad_len = 0;
		hdr->magic_num = htons(hdr->magic_num);
		hdr->pkt_len = htons(hdr->pkt_len);
		IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
		    hdr->magic_num, hdr->pkt_len);
		rc = a2_mux_write_cmd((void *)hdr,
				       sizeof(struct bam_mux_hdr));
		if (rc) {
			IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
			       __func__, rc, lcid);
			kfree(hdr);
			return rc;
		}
		rc = a2_mux_add_hdr(lcid);
		if (rc) {
			IPAERR("a2_mux_add_hdr failed %d; ch: %d\n",
			       rc, lcid);
			return rc;
		}
	}

open_done:
	IPADBG("%s: opened ch %d\n", __func__, lcid);
	return rc;
}

/**
 * a2_mux_close_channel() - closes logical channel
 *		to A2
 * @lcid: logical channel ID
 *
 * Returns: 0 on success, negative on failure
 */
int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
{
	struct bam_mux_hdr *hdr;
	unsigned long flags;
	int rc = 0;
	bool is_connected;

	if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0)
		return -EINVAL;
	IPADBG("%s: closing ch %d\n", __func__, lcid);
	if (!a2_mux_ctx->a2_mux_initialized)
		return -ENODEV;
	read_lock(&a2_mux_ctx->ul_wakeup_lock);
	is_connected = a2_mux_ctx->bam_is_connected &&
					!a2_mux_ctx->bam_connect_in_progress;
	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
	if (!is_connected && !bam_ch_is_in_reset(lcid))
		return -ENODEV;
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	a2_mux_ctx->bam_ch[lcid].notify_cb = NULL;
	a2_mux_ctx->bam_ch[lcid].user_data = NULL;
	a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_LOCAL_OPEN;
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	if (bam_ch_is_in_reset(lcid)) {
		a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_IN_RESET;
		return 0;
	}
	if (lcid != A2_MUX_TETHERED_0) {
		hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
		if (hdr == NULL) {
			IPAERR("%s: hdr kmalloc failed. ch: %d\n",
			       __func__, lcid);
			return -ENOMEM;
		}
		hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
		hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
		hdr->reserved = 0;
		hdr->ch_id = lcid;
		hdr->pkt_len = 0;
		hdr->pad_len = 0;
		hdr->magic_num = htons(hdr->magic_num);
		hdr->pkt_len = htons(hdr->pkt_len);
		IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
		    hdr->magic_num, hdr->pkt_len);
		rc = a2_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
		if (rc) {
			IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
			       __func__, rc, lcid);
			kfree(hdr);
			return rc;
		}

		rc = a2_mux_del_hdr(lcid);
		if (rc) {
			IPAERR("a2_mux_del_hdr failed %d; ch: %d\n",
			       rc, lcid);
			return rc;
		}
	}
	IPADBG("%s: closed ch %d\n", __func__, lcid);
	return 0;
}

/**
 * a2_mux_is_ch_full() - checks if channel is above predefined WM,
 *		used for flow control implementation
 * @lcid: logical channel ID
 *
 * Returns: true if the channel is above predefined WM,
 *		false otherwise
 */
int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
{
	unsigned long flags;
	int ret;

	if (lcid >= A2_MUX_NUM_CHANNELS ||
			lcid < 0)
		return -EINVAL;
	if (!a2_mux_ctx->a2_mux_initialized)
		return -ENODEV;
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	a2_mux_ctx->bam_ch[lcid].use_wm = 1;
	ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts >= HIGH_WATERMARK;
	IPADBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
	     lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
	if (!bam_ch_is_local_open(lcid)) {
		ret = -ENODEV;
		IPAERR("%s: port not open: %d\n", __func__,
		       a2_mux_ctx->bam_ch[lcid].status);
	}
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	return ret;
}

/**
 * a2_mux_is_ch_low() - checks if channel is below predefined WM,
 *		used for flow control implementation
 * @lcid: logical channel ID
 *
 * Returns: true if the channel is below predefined WM,
 *		false otherwise
 */
int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
{
	unsigned long flags;
	int ret;

	if (lcid >= A2_MUX_NUM_CHANNELS ||
			lcid < 0)
		return -EINVAL;
	if (!a2_mux_ctx->a2_mux_initialized)
		return -ENODEV;
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	a2_mux_ctx->bam_ch[lcid].use_wm = 1;
	ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts <= LOW_WATERMARK;
	IPADBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
	     lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
	if (!bam_ch_is_local_open(lcid)) {
		ret = -ENODEV;
		IPAERR("%s: port not open: %d\n", __func__,
		       a2_mux_ctx->bam_ch[lcid].status);
	}
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	return ret;
}

/**
 * a2_mux_is_ch_empty() - checks if channel is empty.
 * @lcid: logical channel ID
 *
 * Returns: true if the channel is empty,
 *		false otherwise
 */
int a2_mux_is_ch_empty(enum a2_mux_logical_channel_id lcid)
{
	unsigned long flags;
	int ret;

	if (lcid >= A2_MUX_NUM_CHANNELS ||
			lcid < 0)
		return -EINVAL;
	if (!a2_mux_ctx->a2_mux_initialized)
		return -ENODEV;
	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	a2_mux_ctx->bam_ch[lcid].use_wm = 1;
	ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts == 0;
	if (!bam_ch_is_local_open(lcid)) {
		ret = -ENODEV;
		IPAERR("%s: port not open: %d\n", __func__,
		       a2_mux_ctx->bam_ch[lcid].status);
	}
	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
	return ret;
}

static int a2_mux_initialize_context(int handle)
{
	int i;

	a2_mux_ctx->a2_mux_apps_pc_enabled = 1;
	a2_mux_ctx->a2_device_handle = handle;
	INIT_WORK(&a2_mux_ctx->kickoff_ul_wakeup, kickoff_ul_wakeup_func);
	INIT_WORK(&a2_mux_ctx->kickoff_ul_power_down,
		  kickoff_ul_power_down_func);
	INIT_WORK(&a2_mux_ctx->kickoff_ul_request_resource,
		  kickoff_ul_request_resource_func);
	INIT_LIST_HEAD(&a2_mux_ctx->bam_tx_pool);
	spin_lock_init(&a2_mux_ctx->bam_tx_pool_spinlock);
	mutex_init(&a2_mux_ctx->wakeup_lock);
	rwlock_init(&a2_mux_ctx->ul_wakeup_lock);
	spin_lock_init(&a2_mux_ctx->wakelock_reference_lock);
	a2_mux_ctx->disconnect_ack = 1;
	mutex_init(&a2_mux_ctx->smsm_cb_lock);
	for (i = 0; i < A2_MUX_NUM_CHANNELS; ++i)
		spin_lock_init(&a2_mux_ctx->bam_ch[i].lock);
	init_completion(&a2_mux_ctx->ul_wakeup_ack_completion);
	init_completion(&a2_mux_ctx->bam_connection_completion);
	init_completion(&a2_mux_ctx->request_resource_completion);
	init_completion(&a2_mux_ctx->dl_wakeup_completion);
	wake_lock_init(&a2_mux_ctx->bam_wakelock,
		       WAKE_LOCK_SUSPEND, "a2_mux_wakelock");
	a2_mux_ctx->a2_mux_initialized = 1;
	a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 1;
	a2_mux_ctx->a2_mux_tx_workqueue =
		create_singlethread_workqueue("a2_mux_tx");
	if (!a2_mux_ctx->a2_mux_tx_workqueue) {
		IPAERR("%s: a2_mux_tx_workqueue alloc failed\n",
		       __func__);
		return -ENOMEM;
	}
	a2_mux_ctx->a2_mux_rx_workqueue =
		create_singlethread_workqueue("a2_mux_rx");
	if (!a2_mux_ctx->a2_mux_rx_workqueue) {
		IPAERR("%s: a2_mux_rx_workqueue alloc failed\n",
			__func__);
		return -ENOMEM;
	}
	return 0;
}

/**
 * a2_mux_init() - initialize A2 MUX component
 *
 * Returns: 0 on success, negative otherwise
 */
int a2_mux_init(void)
{
	int rc;
	u32 h;
	void *a2_virt_addr;
	u32 a2_bam_mem_base;
	u32 a2_bam_mem_size;
	u32 a2_bam_irq;
	struct sps_bam_props a2_props;


	IPADBG("%s A2 MUX\n", __func__);
	rc = ipa_get_a2_mux_bam_info(&a2_bam_mem_base,
				     &a2_bam_mem_size,
				     &a2_bam_irq);
	if (rc) {
		IPAERR("%s: ipa_get_a2_mux_bam_info failed\n", __func__);
		rc = -EFAULT;
		goto bail;
	}
	a2_virt_addr = ioremap_nocache((unsigned long)(a2_bam_mem_base),
							a2_bam_mem_size);
	if (!a2_virt_addr) {
		IPAERR("%s: ioremap failed\n", __func__);
		rc = -ENOMEM;
		goto bail;
	}
	memset(&a2_props, 0, sizeof(a2_props));
	a2_props.phys_addr		= a2_bam_mem_base;
	a2_props.virt_addr		= a2_virt_addr;
	a2_props.virt_size		= a2_bam_mem_size;
	a2_props.irq			= a2_bam_irq;
	a2_props.options		= SPS_BAM_OPT_IRQ_WAKEUP;
	a2_props.num_pipes		= A2_NUM_PIPES;
	a2_props.summing_threshold	= A2_SUMMING_THRESHOLD;
	a2_props.manage                 = SPS_BAM_MGR_DEVICE_REMOTE;
	/* need to free on tear down */
	rc = sps_register_bam_device(&a2_props, &h);
	if (rc < 0) {
		IPAERR("%s: register bam error %d\n", __func__, rc);
		goto register_bam_failed;
	}
	a2_mux_ctx = kzalloc(sizeof(*a2_mux_ctx), GFP_KERNEL);
	if (!a2_mux_ctx) {
		IPAERR("%s: a2_mux_ctx alloc failed, rc: %d\n", __func__, rc);
		rc = -ENOMEM;
		goto register_bam_failed;
	}
	rc = a2_mux_initialize_context(h);
	if (rc) {
		IPAERR("%s: a2_mux_initialize_context failed, rc: %d\n",
		       __func__, rc);
		goto ctx_alloc_failed;
	}
	rc = a2_mux_pm_initialize_rm();
	if (rc) {
		IPAERR("%s: a2_mux_pm_initialize_rm failed, rc: %d\n",
		       __func__, rc);
		goto ctx_alloc_failed;
	}
	rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
					a2_mux_smsm_cb, NULL);
	if (rc) {
		IPAERR("%s: smsm cb register failed, rc: %d\n", __func__, rc);
		rc = -ENOMEM;
		goto ctx_alloc_failed;
	}
	rc = smsm_state_cb_register(SMSM_MODEM_STATE,
				    SMSM_A2_POWER_CONTROL_ACK,
				    a2_mux_smsm_ack_cb, NULL);
	if (rc) {
		IPAERR("%s: smsm ack cb register failed, rc: %d\n",
		       __func__, rc);
		rc = -ENOMEM;
		goto smsm_ack_cb_reg_failed;
	}
	if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
		a2_mux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));

	/*
	 * Set remote channel open for tethered channel since there is
	 *  no actual remote tethered channel
	 */
	a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].status |= BAM_CH_REMOTE_OPEN;

	rc = 0;
	goto bail;

smsm_ack_cb_reg_failed:
	smsm_state_cb_deregister(SMSM_MODEM_STATE,
				SMSM_A2_POWER_CONTROL,
				a2_mux_smsm_cb, NULL);
ctx_alloc_failed:
	kfree(a2_mux_ctx);
register_bam_failed:
	iounmap(a2_virt_addr);
bail:
	return rc;
}

/**
 * a2_mux_exit() - destroy A2 MUX component
 *
 * Returns: 0 on success, negative otherwise
 */
int a2_mux_exit(void)
{
	smsm_state_cb_deregister(SMSM_MODEM_STATE,
			SMSM_A2_POWER_CONTROL_ACK,
			a2_mux_smsm_ack_cb,
			NULL);
	smsm_state_cb_deregister(SMSM_MODEM_STATE,
				SMSM_A2_POWER_CONTROL,
				a2_mux_smsm_cb,
				NULL);
	if (a2_mux_ctx->a2_mux_tx_workqueue)
		destroy_workqueue(a2_mux_ctx->a2_mux_tx_workqueue);
	if (a2_mux_ctx->a2_mux_rx_workqueue)
		destroy_workqueue(a2_mux_ctx->a2_mux_rx_workqueue);
	return 0;
}