Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
/*
 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
 *
 * Copyright (C) 2014 Atmel Corporation
 *
 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <asm/barrier.h>
#include <dt-bindings/dma/at91.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_dma.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>

#include "dmaengine.h"

/* Global registers */
#define AT_XDMAC_GTYPE		0x00	/* Global Type Register */
#define		AT_XDMAC_NB_CH(i)	(((i) & 0x1F) + 1)		/* Number of Channels Minus One */
#define		AT_XDMAC_FIFO_SZ(i)	(((i) >> 5) & 0x7FF)		/* Number of Bytes */
#define		AT_XDMAC_NB_REQ(i)	((((i) >> 16) & 0x3F) + 1)	/* Number of Peripheral Requests Minus One */
#define AT_XDMAC_GCFG		0x04	/* Global Configuration Register */
#define AT_XDMAC_GWAC		0x08	/* Global Weighted Arbiter Configuration Register */
#define AT_XDMAC_GIE		0x0C	/* Global Interrupt Enable Register */
#define AT_XDMAC_GID		0x10	/* Global Interrupt Disable Register */
#define AT_XDMAC_GIM		0x14	/* Global Interrupt Mask Register */
#define AT_XDMAC_GIS		0x18	/* Global Interrupt Status Register */
#define AT_XDMAC_GE		0x1C	/* Global Channel Enable Register */
#define AT_XDMAC_GD		0x20	/* Global Channel Disable Register */
#define AT_XDMAC_GS		0x24	/* Global Channel Status Register */
#define AT_XDMAC_GRS		0x28	/* Global Channel Read Suspend Register */
#define AT_XDMAC_GWS		0x2C	/* Global Write Suspend Register */
#define AT_XDMAC_GRWS		0x30	/* Global Channel Read Write Suspend Register */
#define AT_XDMAC_GRWR		0x34	/* Global Channel Read Write Resume Register */
#define AT_XDMAC_GSWR		0x38	/* Global Channel Software Request Register */
#define AT_XDMAC_GSWS		0x3C	/* Global channel Software Request Status Register */
#define AT_XDMAC_GSWF		0x40	/* Global Channel Software Flush Request Register */
#define AT_XDMAC_VERSION	0xFFC	/* XDMAC Version Register */

/* Channel relative registers offsets */
#define AT_XDMAC_CIE		0x00	/* Channel Interrupt Enable Register */
#define		AT_XDMAC_CIE_BIE	BIT(0)	/* End of Block Interrupt Enable Bit */
#define		AT_XDMAC_CIE_LIE	BIT(1)	/* End of Linked List Interrupt Enable Bit */
#define		AT_XDMAC_CIE_DIE	BIT(2)	/* End of Disable Interrupt Enable Bit */
#define		AT_XDMAC_CIE_FIE	BIT(3)	/* End of Flush Interrupt Enable Bit */
#define		AT_XDMAC_CIE_RBEIE	BIT(4)	/* Read Bus Error Interrupt Enable Bit */
#define		AT_XDMAC_CIE_WBEIE	BIT(5)	/* Write Bus Error Interrupt Enable Bit */
#define		AT_XDMAC_CIE_ROIE	BIT(6)	/* Request Overflow Interrupt Enable Bit */
#define AT_XDMAC_CID		0x04	/* Channel Interrupt Disable Register */
#define		AT_XDMAC_CID_BID	BIT(0)	/* End of Block Interrupt Disable Bit */
#define		AT_XDMAC_CID_LID	BIT(1)	/* End of Linked List Interrupt Disable Bit */
#define		AT_XDMAC_CID_DID	BIT(2)	/* End of Disable Interrupt Disable Bit */
#define		AT_XDMAC_CID_FID	BIT(3)	/* End of Flush Interrupt Disable Bit */
#define		AT_XDMAC_CID_RBEID	BIT(4)	/* Read Bus Error Interrupt Disable Bit */
#define		AT_XDMAC_CID_WBEID	BIT(5)	/* Write Bus Error Interrupt Disable Bit */
#define		AT_XDMAC_CID_ROID	BIT(6)	/* Request Overflow Interrupt Disable Bit */
#define AT_XDMAC_CIM		0x08	/* Channel Interrupt Mask Register */
#define		AT_XDMAC_CIM_BIM	BIT(0)	/* End of Block Interrupt Mask Bit */
#define		AT_XDMAC_CIM_LIM	BIT(1)	/* End of Linked List Interrupt Mask Bit */
#define		AT_XDMAC_CIM_DIM	BIT(2)	/* End of Disable Interrupt Mask Bit */
#define		AT_XDMAC_CIM_FIM	BIT(3)	/* End of Flush Interrupt Mask Bit */
#define		AT_XDMAC_CIM_RBEIM	BIT(4)	/* Read Bus Error Interrupt Mask Bit */
#define		AT_XDMAC_CIM_WBEIM	BIT(5)	/* Write Bus Error Interrupt Mask Bit */
#define		AT_XDMAC_CIM_ROIM	BIT(6)	/* Request Overflow Interrupt Mask Bit */
#define AT_XDMAC_CIS		0x0C	/* Channel Interrupt Status Register */
#define		AT_XDMAC_CIS_BIS	BIT(0)	/* End of Block Interrupt Status Bit */
#define		AT_XDMAC_CIS_LIS	BIT(1)	/* End of Linked List Interrupt Status Bit */
#define		AT_XDMAC_CIS_DIS	BIT(2)	/* End of Disable Interrupt Status Bit */
#define		AT_XDMAC_CIS_FIS	BIT(3)	/* End of Flush Interrupt Status Bit */
#define		AT_XDMAC_CIS_RBEIS	BIT(4)	/* Read Bus Error Interrupt Status Bit */
#define		AT_XDMAC_CIS_WBEIS	BIT(5)	/* Write Bus Error Interrupt Status Bit */
#define		AT_XDMAC_CIS_ROIS	BIT(6)	/* Request Overflow Interrupt Status Bit */
#define AT_XDMAC_CSA		0x10	/* Channel Source Address Register */
#define AT_XDMAC_CDA		0x14	/* Channel Destination Address Register */
#define AT_XDMAC_CNDA		0x18	/* Channel Next Descriptor Address Register */
#define		AT_XDMAC_CNDA_NDAIF(i)	((i) & 0x1)			/* Channel x Next Descriptor Interface */
#define		AT_XDMAC_CNDA_NDA(i)	((i) & 0xfffffffc)		/* Channel x Next Descriptor Address */
#define AT_XDMAC_CNDC		0x1C	/* Channel Next Descriptor Control Register */
#define		AT_XDMAC_CNDC_NDE		(0x1 << 0)		/* Channel x Next Descriptor Enable */
#define		AT_XDMAC_CNDC_NDSUP		(0x1 << 1)		/* Channel x Next Descriptor Source Update */
#define		AT_XDMAC_CNDC_NDDUP		(0x1 << 2)		/* Channel x Next Descriptor Destination Update */
#define		AT_XDMAC_CNDC_NDVIEW_NDV0	(0x0 << 3)		/* Channel x Next Descriptor View 0 */
#define		AT_XDMAC_CNDC_NDVIEW_NDV1	(0x1 << 3)		/* Channel x Next Descriptor View 1 */
#define		AT_XDMAC_CNDC_NDVIEW_NDV2	(0x2 << 3)		/* Channel x Next Descriptor View 2 */
#define		AT_XDMAC_CNDC_NDVIEW_NDV3	(0x3 << 3)		/* Channel x Next Descriptor View 3 */
#define AT_XDMAC_CUBC		0x20	/* Channel Microblock Control Register */
#define AT_XDMAC_CBC		0x24	/* Channel Block Control Register */
#define AT_XDMAC_CC		0x28	/* Channel Configuration Register */
#define		AT_XDMAC_CC_TYPE	(0x1 << 0)	/* Channel Transfer Type */
#define			AT_XDMAC_CC_TYPE_MEM_TRAN	(0x0 << 0)	/* Memory to Memory Transfer */
#define			AT_XDMAC_CC_TYPE_PER_TRAN	(0x1 << 0)	/* Peripheral to Memory or Memory to Peripheral Transfer */
#define		AT_XDMAC_CC_MBSIZE_MASK	(0x3 << 1)
#define			AT_XDMAC_CC_MBSIZE_SINGLE	(0x0 << 1)
#define			AT_XDMAC_CC_MBSIZE_FOUR		(0x1 << 1)
#define			AT_XDMAC_CC_MBSIZE_EIGHT	(0x2 << 1)
#define			AT_XDMAC_CC_MBSIZE_SIXTEEN	(0x3 << 1)
#define		AT_XDMAC_CC_DSYNC	(0x1 << 4)	/* Channel Synchronization */
#define			AT_XDMAC_CC_DSYNC_PER2MEM	(0x0 << 4)
#define			AT_XDMAC_CC_DSYNC_MEM2PER	(0x1 << 4)
#define		AT_XDMAC_CC_PROT	(0x1 << 5)	/* Channel Protection */
#define			AT_XDMAC_CC_PROT_SEC		(0x0 << 5)
#define			AT_XDMAC_CC_PROT_UNSEC		(0x1 << 5)
#define		AT_XDMAC_CC_SWREQ	(0x1 << 6)	/* Channel Software Request Trigger */
#define			AT_XDMAC_CC_SWREQ_HWR_CONNECTED	(0x0 << 6)
#define			AT_XDMAC_CC_SWREQ_SWR_CONNECTED	(0x1 << 6)
#define		AT_XDMAC_CC_MEMSET	(0x1 << 7)	/* Channel Fill Block of memory */
#define			AT_XDMAC_CC_MEMSET_NORMAL_MODE	(0x0 << 7)
#define			AT_XDMAC_CC_MEMSET_HW_MODE	(0x1 << 7)
#define		AT_XDMAC_CC_CSIZE(i)	((0x7 & (i)) << 8)	/* Channel Chunk Size */
#define		AT_XDMAC_CC_DWIDTH_OFFSET	11
#define		AT_XDMAC_CC_DWIDTH_MASK	(0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
#define		AT_XDMAC_CC_DWIDTH(i)	((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)	/* Channel Data Width */
#define			AT_XDMAC_CC_DWIDTH_BYTE		0x0
#define			AT_XDMAC_CC_DWIDTH_HALFWORD	0x1
#define			AT_XDMAC_CC_DWIDTH_WORD		0x2
#define			AT_XDMAC_CC_DWIDTH_DWORD	0x3
#define		AT_XDMAC_CC_SIF(i)	((0x1 & (i)) << 13)	/* Channel Source Interface Identifier */
#define		AT_XDMAC_CC_DIF(i)	((0x1 & (i)) << 14)	/* Channel Destination Interface Identifier */
#define		AT_XDMAC_CC_SAM_MASK	(0x3 << 16)	/* Channel Source Addressing Mode */
#define			AT_XDMAC_CC_SAM_FIXED_AM	(0x0 << 16)
#define			AT_XDMAC_CC_SAM_INCREMENTED_AM	(0x1 << 16)
#define			AT_XDMAC_CC_SAM_UBS_AM		(0x2 << 16)
#define			AT_XDMAC_CC_SAM_UBS_DS_AM	(0x3 << 16)
#define		AT_XDMAC_CC_DAM_MASK	(0x3 << 18)	/* Channel Source Addressing Mode */
#define			AT_XDMAC_CC_DAM_FIXED_AM	(0x0 << 18)
#define			AT_XDMAC_CC_DAM_INCREMENTED_AM	(0x1 << 18)
#define			AT_XDMAC_CC_DAM_UBS_AM		(0x2 << 18)
#define			AT_XDMAC_CC_DAM_UBS_DS_AM	(0x3 << 18)
#define		AT_XDMAC_CC_INITD	(0x1 << 21)	/* Channel Initialization Terminated (read only) */
#define			AT_XDMAC_CC_INITD_TERMINATED	(0x0 << 21)
#define			AT_XDMAC_CC_INITD_IN_PROGRESS	(0x1 << 21)
#define		AT_XDMAC_CC_RDIP	(0x1 << 22)	/* Read in Progress (read only) */
#define			AT_XDMAC_CC_RDIP_DONE		(0x0 << 22)
#define			AT_XDMAC_CC_RDIP_IN_PROGRESS	(0x1 << 22)
#define		AT_XDMAC_CC_WRIP	(0x1 << 23)	/* Write in Progress (read only) */
#define			AT_XDMAC_CC_WRIP_DONE		(0x0 << 23)
#define			AT_XDMAC_CC_WRIP_IN_PROGRESS	(0x1 << 23)
#define		AT_XDMAC_CC_PERID(i)	(0x7f & (i) << 24)	/* Channel Peripheral Identifier */
#define AT_XDMAC_CDS_MSP	0x2C	/* Channel Data Stride Memory Set Pattern */
#define AT_XDMAC_CSUS		0x30	/* Channel Source Microblock Stride */
#define AT_XDMAC_CDUS		0x34	/* Channel Destination Microblock Stride */

#define AT_XDMAC_CHAN_REG_BASE	0x50	/* Channel registers base address */

/* Microblock control members */
#define AT_XDMAC_MBR_UBC_UBLEN_MAX	0xFFFFFFUL	/* Maximum Microblock Length */
#define AT_XDMAC_MBR_UBC_NDE		(0x1 << 24)	/* Next Descriptor Enable */
#define AT_XDMAC_MBR_UBC_NSEN		(0x1 << 25)	/* Next Descriptor Source Update */
#define AT_XDMAC_MBR_UBC_NDEN		(0x1 << 26)	/* Next Descriptor Destination Update */
#define AT_XDMAC_MBR_UBC_NDV0		(0x0 << 27)	/* Next Descriptor View 0 */
#define AT_XDMAC_MBR_UBC_NDV1		(0x1 << 27)	/* Next Descriptor View 1 */
#define AT_XDMAC_MBR_UBC_NDV2		(0x2 << 27)	/* Next Descriptor View 2 */
#define AT_XDMAC_MBR_UBC_NDV3		(0x3 << 27)	/* Next Descriptor View 3 */

#define AT_XDMAC_MAX_CHAN	0x20
#define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
#define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
#define AT_XDMAC_RESIDUE_MAX_RETRIES	5

#define AT_XDMAC_DMA_BUSWIDTHS\
	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))

enum atc_status {
	AT_XDMAC_CHAN_IS_CYCLIC = 0,
	AT_XDMAC_CHAN_IS_PAUSED,
};

/* ----- Channels ----- */
struct at_xdmac_chan {
	struct dma_chan			chan;
	void __iomem			*ch_regs;
	u32				mask;		/* Channel Mask */
	u32				cfg;		/* Channel Configuration Register */
	u8				perid;		/* Peripheral ID */
	u8				perif;		/* Peripheral Interface */
	u8				memif;		/* Memory Interface */
	u32				save_cc;
	u32				save_cim;
	u32				save_cnda;
	u32				save_cndc;
	u32				irq_status;
	unsigned long			status;
	struct tasklet_struct		tasklet;
	struct dma_slave_config		sconfig;

	spinlock_t			lock;

	struct list_head		xfers_list;
	struct list_head		free_descs_list;
};


/* ----- Controller ----- */
struct at_xdmac {
	struct dma_device	dma;
	void __iomem		*regs;
	int			irq;
	struct clk		*clk;
	u32			save_gim;
	struct dma_pool		*at_xdmac_desc_pool;
	struct at_xdmac_chan	chan[0];
};


/* ----- Descriptors ----- */

/* Linked List Descriptor */
struct at_xdmac_lld {
	dma_addr_t	mbr_nda;	/* Next Descriptor Member */
	u32		mbr_ubc;	/* Microblock Control Member */
	dma_addr_t	mbr_sa;		/* Source Address Member */
	dma_addr_t	mbr_da;		/* Destination Address Member */
	u32		mbr_cfg;	/* Configuration Register */
	u32		mbr_bc;		/* Block Control Register */
	u32		mbr_ds;		/* Data Stride Register */
	u32		mbr_sus;	/* Source Microblock Stride Register */
	u32		mbr_dus;	/* Destination Microblock Stride Register */
};

/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
struct at_xdmac_desc {
	struct at_xdmac_lld		lld;
	enum dma_transfer_direction	direction;
	struct dma_async_tx_descriptor	tx_dma_desc;
	struct list_head		desc_node;
	/* Following members are only used by the first descriptor */
	bool				active_xfer;
	unsigned int			xfer_size;
	struct list_head		descs_list;
	struct list_head		xfer_node;
} __aligned(sizeof(u64));

static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
	return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
}

#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
#define at_xdmac_write(atxdmac, reg, value) \
	writel_relaxed((value), (atxdmac)->regs + (reg))

#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))

static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
{
	return container_of(dchan, struct at_xdmac_chan, chan);
}

static struct device *chan2dev(struct dma_chan *chan)
{
	return &chan->dev->device;
}

static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
{
	return container_of(ddev, struct at_xdmac, dma);
}

static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
{
	return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
}

static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
{
	return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
}

static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
{
	return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
}

static inline int at_xdmac_csize(u32 maxburst)
{
	int csize;

	csize = ffs(maxburst) - 1;
	if (csize > 4)
		csize = -EINVAL;

	return csize;
};

static inline u8 at_xdmac_get_dwidth(u32 cfg)
{
	return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
};

static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
		 "initial descriptors per channel (default: 64)");


static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
{
	return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
}

static void at_xdmac_off(struct at_xdmac *atxdmac)
{
	at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);

	/* Wait that all chans are disabled. */
	while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
		cpu_relax();

	at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
}

/* Call with lock hold. */
static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
				struct at_xdmac_desc *first)
{
	struct at_xdmac	*atxdmac = to_at_xdmac(atchan->chan.device);
	u32		reg;

	dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);

	if (at_xdmac_chan_is_enabled(atchan))
		return;

	/* Set transfer as active to not try to start it again. */
	first->active_xfer = true;

	/* Tell xdmac where to get the first descriptor. */
	reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
	      | AT_XDMAC_CNDA_NDAIF(atchan->memif);
	at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);

	/*
	 * When doing non cyclic transfer we need to use the next
	 * descriptor view 2 since some fields of the configuration register
	 * depend on transfer size and src/dest addresses.
	 */
	if (at_xdmac_chan_is_cyclic(atchan))
		reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
	else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
		reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
	else
		reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
	/*
	 * Even if the register will be updated from the configuration in the
	 * descriptor when using view 2 or higher, the PROT bit won't be set
	 * properly. This bit can be modified only by using the channel
	 * configuration register.
	 */
	at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);

	reg |= AT_XDMAC_CNDC_NDDUP
	       | AT_XDMAC_CNDC_NDSUP
	       | AT_XDMAC_CNDC_NDE;
	at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);

	dev_vdbg(chan2dev(&atchan->chan),
		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));

	at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
	reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
	/*
	 * There is no end of list when doing cyclic dma, we need to get
	 * an interrupt after each periods.
	 */
	if (at_xdmac_chan_is_cyclic(atchan))
		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
				    reg | AT_XDMAC_CIE_BIE);
	else
		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
				    reg | AT_XDMAC_CIE_LIE);
	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
	dev_vdbg(chan2dev(&atchan->chan),
		 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
	wmb();
	at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);

	dev_vdbg(chan2dev(&atchan->chan),
		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));

}

static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct at_xdmac_desc	*desc = txd_to_at_desc(tx);
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(tx->chan);
	dma_cookie_t		cookie;
	unsigned long		irqflags;

	spin_lock_irqsave(&atchan->lock, irqflags);
	cookie = dma_cookie_assign(tx);

	dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
		 __func__, atchan, desc);
	list_add_tail(&desc->xfer_node, &atchan->xfers_list);
	if (list_is_singular(&atchan->xfers_list))
		at_xdmac_start_xfer(atchan, desc);

	spin_unlock_irqrestore(&atchan->lock, irqflags);
	return cookie;
}

static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
						 gfp_t gfp_flags)
{
	struct at_xdmac_desc	*desc;
	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
	dma_addr_t		phys;

	desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
	if (desc) {
		INIT_LIST_HEAD(&desc->descs_list);
		dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
		desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
		desc->tx_dma_desc.phys = phys;
	}

	return desc;
}

static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
{
	memset(&desc->lld, 0, sizeof(desc->lld));
	INIT_LIST_HEAD(&desc->descs_list);
	desc->direction = DMA_TRANS_NONE;
	desc->xfer_size = 0;
	desc->active_xfer = false;
}

/* Call must be protected by lock. */
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
{
	struct at_xdmac_desc *desc;

	if (list_empty(&atchan->free_descs_list)) {
		desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
	} else {
		desc = list_first_entry(&atchan->free_descs_list,
					struct at_xdmac_desc, desc_node);
		list_del(&desc->desc_node);
		at_xdmac_init_used_desc(desc);
	}

	return desc;
}

static void at_xdmac_queue_desc(struct dma_chan *chan,
				struct at_xdmac_desc *prev,
				struct at_xdmac_desc *desc)
{
	if (!prev || !desc)
		return;

	prev->lld.mbr_nda = desc->tx_dma_desc.phys;
	prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;

	dev_dbg(chan2dev(chan),	"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
		__func__, prev, &prev->lld.mbr_nda);
}

static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
						  struct at_xdmac_desc *desc)
{
	if (!desc)
		return;

	desc->lld.mbr_bc++;

	dev_dbg(chan2dev(chan),
		"%s: incrementing the block count of the desc 0x%p\n",
		__func__, desc);
}

static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
				       struct of_dma *of_dma)
{
	struct at_xdmac		*atxdmac = of_dma->of_dma_data;
	struct at_xdmac_chan	*atchan;
	struct dma_chan		*chan;
	struct device		*dev = atxdmac->dma.dev;

	if (dma_spec->args_count != 1) {
		dev_err(dev, "dma phandler args: bad number of args\n");
		return NULL;
	}

	chan = dma_get_any_slave_channel(&atxdmac->dma);
	if (!chan) {
		dev_err(dev, "can't get a dma channel\n");
		return NULL;
	}

	atchan = to_at_xdmac_chan(chan);
	atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
	atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
	atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
	dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
		 atchan->memif, atchan->perif, atchan->perid);

	return chan;
}

static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
				      enum dma_transfer_direction direction)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	int			csize, dwidth;

	if (direction == DMA_DEV_TO_MEM) {
		atchan->cfg =
			AT91_XDMAC_DT_PERID(atchan->perid)
			| AT_XDMAC_CC_DAM_INCREMENTED_AM
			| AT_XDMAC_CC_SAM_FIXED_AM
			| AT_XDMAC_CC_DIF(atchan->memif)
			| AT_XDMAC_CC_SIF(atchan->perif)
			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
			| AT_XDMAC_CC_DSYNC_PER2MEM
			| AT_XDMAC_CC_MBSIZE_SIXTEEN
			| AT_XDMAC_CC_TYPE_PER_TRAN;
		csize = ffs(atchan->sconfig.src_maxburst) - 1;
		if (csize < 0) {
			dev_err(chan2dev(chan), "invalid src maxburst value\n");
			return -EINVAL;
		}
		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
		dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
		if (dwidth < 0) {
			dev_err(chan2dev(chan), "invalid src addr width value\n");
			return -EINVAL;
		}
		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
	} else if (direction == DMA_MEM_TO_DEV) {
		atchan->cfg =
			AT91_XDMAC_DT_PERID(atchan->perid)
			| AT_XDMAC_CC_DAM_FIXED_AM
			| AT_XDMAC_CC_SAM_INCREMENTED_AM
			| AT_XDMAC_CC_DIF(atchan->perif)
			| AT_XDMAC_CC_SIF(atchan->memif)
			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
			| AT_XDMAC_CC_DSYNC_MEM2PER
			| AT_XDMAC_CC_MBSIZE_SIXTEEN
			| AT_XDMAC_CC_TYPE_PER_TRAN;
		csize = ffs(atchan->sconfig.dst_maxburst) - 1;
		if (csize < 0) {
			dev_err(chan2dev(chan), "invalid src maxburst value\n");
			return -EINVAL;
		}
		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
		dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
		if (dwidth < 0) {
			dev_err(chan2dev(chan), "invalid dst addr width value\n");
			return -EINVAL;
		}
		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
	}

	dev_dbg(chan2dev(chan),	"%s: cfg=0x%08x\n", __func__, atchan->cfg);

	return 0;
}

/*
 * Only check that maxburst and addr width values are supported by the
 * the controller but not that the configuration is good to perform the
 * transfer since we don't know the direction at this stage.
 */
static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
{
	if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
	    || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
		return -EINVAL;

	if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
	    || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
		return -EINVAL;

	return 0;
}

static int at_xdmac_set_slave_config(struct dma_chan *chan,
				      struct dma_slave_config *sconfig)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);

	if (at_xdmac_check_slave_config(sconfig)) {
		dev_err(chan2dev(chan), "invalid slave configuration\n");
		return -EINVAL;
	}

	memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));

	return 0;
}

static struct dma_async_tx_descriptor *
at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
		       unsigned int sg_len, enum dma_transfer_direction direction,
		       unsigned long flags, void *context)
{
	struct at_xdmac_chan		*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc		*first = NULL, *prev = NULL;
	struct scatterlist		*sg;
	int				i;
	unsigned int			xfer_size = 0;
	unsigned long			irqflags;
	struct dma_async_tx_descriptor	*ret = NULL;

	if (!sgl)
		return NULL;

	if (!is_slave_direction(direction)) {
		dev_err(chan2dev(chan), "invalid DMA direction\n");
		return NULL;
	}

	dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
		 __func__, sg_len,
		 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
		 flags);

	/* Protect dma_sconfig field that can be modified by set_slave_conf. */
	spin_lock_irqsave(&atchan->lock, irqflags);

	if (at_xdmac_compute_chan_conf(chan, direction))
		goto spin_unlock;

	/* Prepare descriptors. */
	for_each_sg(sgl, sg, sg_len, i) {
		struct at_xdmac_desc	*desc = NULL;
		u32			len, mem, dwidth, fixed_dwidth;

		len = sg_dma_len(sg);
		mem = sg_dma_address(sg);
		if (unlikely(!len)) {
			dev_err(chan2dev(chan), "sg data length is zero\n");
			goto spin_unlock;
		}
		dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
			 __func__, i, len, mem);

		desc = at_xdmac_get_desc(atchan);
		if (!desc) {
			dev_err(chan2dev(chan), "can't get descriptor\n");
			if (first)
				list_splice_init(&first->descs_list, &atchan->free_descs_list);
			goto spin_unlock;
		}

		/* Linked list descriptor setup. */
		if (direction == DMA_DEV_TO_MEM) {
			desc->lld.mbr_sa = atchan->sconfig.src_addr;
			desc->lld.mbr_da = mem;
		} else {
			desc->lld.mbr_sa = mem;
			desc->lld.mbr_da = atchan->sconfig.dst_addr;
		}
		dwidth = at_xdmac_get_dwidth(atchan->cfg);
		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
			       ? dwidth
			       : AT_XDMAC_CC_DWIDTH_BYTE;
		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
			| (len >> fixed_dwidth);				/* microblock length */
		desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
				    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
		dev_dbg(chan2dev(chan),
			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);

		/* Chain lld. */
		if (prev)
			at_xdmac_queue_desc(chan, prev, desc);

		prev = desc;
		if (!first)
			first = desc;

		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
			 __func__, desc, first);
		list_add_tail(&desc->desc_node, &first->descs_list);
		xfer_size += len;
	}


	first->tx_dma_desc.flags = flags;
	first->xfer_size = xfer_size;
	first->direction = direction;
	ret = &first->tx_dma_desc;

spin_unlock:
	spin_unlock_irqrestore(&atchan->lock, irqflags);
	return ret;
}

static struct dma_async_tx_descriptor *
at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
			 size_t buf_len, size_t period_len,
			 enum dma_transfer_direction direction,
			 unsigned long flags)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc	*first = NULL, *prev = NULL;
	unsigned int		periods = buf_len / period_len;
	int			i;
	unsigned long		irqflags;

	dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
		__func__, &buf_addr, buf_len, period_len,
		direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);

	if (!is_slave_direction(direction)) {
		dev_err(chan2dev(chan), "invalid DMA direction\n");
		return NULL;
	}

	if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
		dev_err(chan2dev(chan), "channel currently used\n");
		return NULL;
	}

	if (at_xdmac_compute_chan_conf(chan, direction))
		return NULL;

	for (i = 0; i < periods; i++) {
		struct at_xdmac_desc	*desc = NULL;

		spin_lock_irqsave(&atchan->lock, irqflags);
		desc = at_xdmac_get_desc(atchan);
		if (!desc) {
			dev_err(chan2dev(chan), "can't get descriptor\n");
			if (first)
				list_splice_init(&first->descs_list, &atchan->free_descs_list);
			spin_unlock_irqrestore(&atchan->lock, irqflags);
			return NULL;
		}
		spin_unlock_irqrestore(&atchan->lock, irqflags);
		dev_dbg(chan2dev(chan),
			"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
			__func__, desc, &desc->tx_dma_desc.phys);

		if (direction == DMA_DEV_TO_MEM) {
			desc->lld.mbr_sa = atchan->sconfig.src_addr;
			desc->lld.mbr_da = buf_addr + i * period_len;
		} else {
			desc->lld.mbr_sa = buf_addr + i * period_len;
			desc->lld.mbr_da = atchan->sconfig.dst_addr;
		}
		desc->lld.mbr_cfg = atchan->cfg;
		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
			| AT_XDMAC_MBR_UBC_NDEN
			| AT_XDMAC_MBR_UBC_NSEN
			| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);

		dev_dbg(chan2dev(chan),
			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);

		/* Chain lld. */
		if (prev)
			at_xdmac_queue_desc(chan, prev, desc);

		prev = desc;
		if (!first)
			first = desc;

		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
			 __func__, desc, first);
		list_add_tail(&desc->desc_node, &first->descs_list);
	}

	at_xdmac_queue_desc(chan, prev, first);
	first->tx_dma_desc.flags = flags;
	first->xfer_size = buf_len;
	first->direction = direction;

	return &first->tx_dma_desc;
}

static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
{
	u32 width;

	/*
	 * Check address alignment to select the greater data width we
	 * can use.
	 *
	 * Some XDMAC implementations don't provide dword transfer, in
	 * this case selecting dword has the same behavior as
	 * selecting word transfers.
	 */
	if (!(addr & 7)) {
		width = AT_XDMAC_CC_DWIDTH_DWORD;
		dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
	} else if (!(addr & 3)) {
		width = AT_XDMAC_CC_DWIDTH_WORD;
		dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
	} else if (!(addr & 1)) {
		width = AT_XDMAC_CC_DWIDTH_HALFWORD;
		dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
	} else {
		width = AT_XDMAC_CC_DWIDTH_BYTE;
		dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
	}

	return width;
}

static struct at_xdmac_desc *
at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
				struct at_xdmac_chan *atchan,
				struct at_xdmac_desc *prev,
				dma_addr_t src, dma_addr_t dst,
				struct dma_interleaved_template *xt,
				struct data_chunk *chunk)
{
	struct at_xdmac_desc	*desc;
	u32			dwidth;
	unsigned long		flags;
	size_t			ublen;
	/*
	 * WARNING: The channel configuration is set here since there is no
	 * dmaengine_slave_config call in this case. Moreover we don't know the
	 * direction, it involves we can't dynamically set the source and dest
	 * interface so we have to use the same one. Only interface 0 allows EBI
	 * access. Hopefully we can access DDR through both ports (at least on
	 * SAMA5D4x), so we can use the same interface for source and dest,
	 * that solves the fact we don't know the direction.
	 * ERRATA: Even if useless for memory transfers, the PERID has to not
	 * match the one of another channel. If not, it could lead to spurious
	 * flag status.
	 */
	u32			chan_cc = AT_XDMAC_CC_PERID(0x3f)
					| AT_XDMAC_CC_DIF(0)
					| AT_XDMAC_CC_SIF(0)
					| AT_XDMAC_CC_MBSIZE_SIXTEEN
					| AT_XDMAC_CC_TYPE_MEM_TRAN;

	dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
	if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
		dev_dbg(chan2dev(chan),
			"%s: chunk too big (%zu, max size %lu)...\n",
			__func__, chunk->size,
			AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
		return NULL;
	}

	if (prev)
		dev_dbg(chan2dev(chan),
			"Adding items at the end of desc 0x%p\n", prev);

	if (xt->src_inc) {
		if (xt->src_sgl)
			chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
		else
			chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
	}

	if (xt->dst_inc) {
		if (xt->dst_sgl)
			chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
		else
			chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
	}

	spin_lock_irqsave(&atchan->lock, flags);
	desc = at_xdmac_get_desc(atchan);
	spin_unlock_irqrestore(&atchan->lock, flags);
	if (!desc) {
		dev_err(chan2dev(chan), "can't get descriptor\n");
		return NULL;
	}

	chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);

	ublen = chunk->size >> dwidth;

	desc->lld.mbr_sa = src;
	desc->lld.mbr_da = dst;
	desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
	desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);

	desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
		| AT_XDMAC_MBR_UBC_NDEN
		| AT_XDMAC_MBR_UBC_NSEN
		| ublen;
	desc->lld.mbr_cfg = chan_cc;

	dev_dbg(chan2dev(chan),
		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
		__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
		desc->lld.mbr_ubc, desc->lld.mbr_cfg);

	/* Chain lld. */
	if (prev)
		at_xdmac_queue_desc(chan, prev, desc);

	return desc;
}

static struct dma_async_tx_descriptor *
at_xdmac_prep_interleaved(struct dma_chan *chan,
			  struct dma_interleaved_template *xt,
			  unsigned long flags)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc	*prev = NULL, *first = NULL;
	dma_addr_t		dst_addr, src_addr;
	size_t			src_skip = 0, dst_skip = 0, len = 0;
	struct data_chunk	*chunk;
	int			i;

	if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
		return NULL;

	/*
	 * TODO: Handle the case where we have to repeat a chain of
	 * descriptors...
	 */
	if ((xt->numf > 1) && (xt->frame_size > 1))
		return NULL;

	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
		__func__, &xt->src_start, &xt->dst_start,	xt->numf,
		xt->frame_size, flags);

	src_addr = xt->src_start;
	dst_addr = xt->dst_start;

	if (xt->numf > 1) {
		first = at_xdmac_interleaved_queue_desc(chan, atchan,
							NULL,
							src_addr, dst_addr,
							xt, xt->sgl);

		/* Length of the block is (BLEN+1) microblocks. */
		for (i = 0; i < xt->numf - 1; i++)
			at_xdmac_increment_block_count(chan, first);

		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
			__func__, first, first);
		list_add_tail(&first->desc_node, &first->descs_list);
	} else {
		for (i = 0; i < xt->frame_size; i++) {
			size_t src_icg = 0, dst_icg = 0;
			struct at_xdmac_desc *desc;

			chunk = xt->sgl + i;

			dst_icg = dmaengine_get_dst_icg(xt, chunk);
			src_icg = dmaengine_get_src_icg(xt, chunk);

			src_skip = chunk->size + src_icg;
			dst_skip = chunk->size + dst_icg;

			dev_dbg(chan2dev(chan),
				"%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
				__func__, chunk->size, src_icg, dst_icg);

			desc = at_xdmac_interleaved_queue_desc(chan, atchan,
							       prev,
							       src_addr, dst_addr,
							       xt, chunk);
			if (!desc) {
				list_splice_init(&first->descs_list,
						 &atchan->free_descs_list);
				return NULL;
			}

			if (!first)
				first = desc;

			dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
				__func__, desc, first);
			list_add_tail(&desc->desc_node, &first->descs_list);

			if (xt->src_sgl)
				src_addr += src_skip;

			if (xt->dst_sgl)
				dst_addr += dst_skip;

			len += chunk->size;
			prev = desc;
		}
	}

	first->tx_dma_desc.cookie = -EBUSY;
	first->tx_dma_desc.flags = flags;
	first->xfer_size = len;

	return &first->tx_dma_desc;
}

static struct dma_async_tx_descriptor *
at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
			 size_t len, unsigned long flags)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc	*first = NULL, *prev = NULL;
	size_t			remaining_size = len, xfer_size = 0, ublen;
	dma_addr_t		src_addr = src, dst_addr = dest;
	u32			dwidth;
	/*
	 * WARNING: We don't know the direction, it involves we can't
	 * dynamically set the source and dest interface so we have to use the
	 * same one. Only interface 0 allows EBI access. Hopefully we can
	 * access DDR through both ports (at least on SAMA5D4x), so we can use
	 * the same interface for source and dest, that solves the fact we
	 * don't know the direction.
	 * ERRATA: Even if useless for memory transfers, the PERID has to not
	 * match the one of another channel. If not, it could lead to spurious
	 * flag status.
	 */
	u32			chan_cc = AT_XDMAC_CC_PERID(0x3f)
					| AT_XDMAC_CC_DAM_INCREMENTED_AM
					| AT_XDMAC_CC_SAM_INCREMENTED_AM
					| AT_XDMAC_CC_DIF(0)
					| AT_XDMAC_CC_SIF(0)
					| AT_XDMAC_CC_MBSIZE_SIXTEEN
					| AT_XDMAC_CC_TYPE_MEM_TRAN;
	unsigned long		irqflags;

	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
		__func__, &src, &dest, len, flags);

	if (unlikely(!len))
		return NULL;

	dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);

	/* Prepare descriptors. */
	while (remaining_size) {
		struct at_xdmac_desc	*desc = NULL;

		dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);

		spin_lock_irqsave(&atchan->lock, irqflags);
		desc = at_xdmac_get_desc(atchan);
		spin_unlock_irqrestore(&atchan->lock, irqflags);
		if (!desc) {
			dev_err(chan2dev(chan), "can't get descriptor\n");
			if (first)
				list_splice_init(&first->descs_list, &atchan->free_descs_list);
			return NULL;
		}

		/* Update src and dest addresses. */
		src_addr += xfer_size;
		dst_addr += xfer_size;

		if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
			xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
		else
			xfer_size = remaining_size;

		dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);

		/* Check remaining length and change data width if needed. */
		dwidth = at_xdmac_align_width(chan,
					      src_addr | dst_addr | xfer_size);
		chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
		chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);

		ublen = xfer_size >> dwidth;
		remaining_size -= xfer_size;

		desc->lld.mbr_sa = src_addr;
		desc->lld.mbr_da = dst_addr;
		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
			| AT_XDMAC_MBR_UBC_NDEN
			| AT_XDMAC_MBR_UBC_NSEN
			| ublen;
		desc->lld.mbr_cfg = chan_cc;

		dev_dbg(chan2dev(chan),
			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);

		/* Chain lld. */
		if (prev)
			at_xdmac_queue_desc(chan, prev, desc);

		prev = desc;
		if (!first)
			first = desc;

		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
			 __func__, desc, first);
		list_add_tail(&desc->desc_node, &first->descs_list);
	}

	first->tx_dma_desc.flags = flags;
	first->xfer_size = len;

	return &first->tx_dma_desc;
}

static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
							 struct at_xdmac_chan *atchan,
							 dma_addr_t dst_addr,
							 size_t len,
							 int value)
{
	struct at_xdmac_desc	*desc;
	unsigned long		flags;
	size_t			ublen;
	u32			dwidth;
	/*
	 * WARNING: The channel configuration is set here since there is no
	 * dmaengine_slave_config call in this case. Moreover we don't know the
	 * direction, it involves we can't dynamically set the source and dest
	 * interface so we have to use the same one. Only interface 0 allows EBI
	 * access. Hopefully we can access DDR through both ports (at least on
	 * SAMA5D4x), so we can use the same interface for source and dest,
	 * that solves the fact we don't know the direction.
	 * ERRATA: Even if useless for memory transfers, the PERID has to not
	 * match the one of another channel. If not, it could lead to spurious
	 * flag status.
	 */
	u32			chan_cc = AT_XDMAC_CC_PERID(0x3f)
					| AT_XDMAC_CC_DAM_UBS_AM
					| AT_XDMAC_CC_SAM_INCREMENTED_AM
					| AT_XDMAC_CC_DIF(0)
					| AT_XDMAC_CC_SIF(0)
					| AT_XDMAC_CC_MBSIZE_SIXTEEN
					| AT_XDMAC_CC_MEMSET_HW_MODE
					| AT_XDMAC_CC_TYPE_MEM_TRAN;

	dwidth = at_xdmac_align_width(chan, dst_addr);

	if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
		dev_err(chan2dev(chan),
			"%s: Transfer too large, aborting...\n",
			__func__);
		return NULL;
	}

	spin_lock_irqsave(&atchan->lock, flags);
	desc = at_xdmac_get_desc(atchan);
	spin_unlock_irqrestore(&atchan->lock, flags);
	if (!desc) {
		dev_err(chan2dev(chan), "can't get descriptor\n");
		return NULL;
	}

	chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);

	ublen = len >> dwidth;

	desc->lld.mbr_da = dst_addr;
	desc->lld.mbr_ds = value;
	desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
		| AT_XDMAC_MBR_UBC_NDEN
		| AT_XDMAC_MBR_UBC_NSEN
		| ublen;
	desc->lld.mbr_cfg = chan_cc;

	dev_dbg(chan2dev(chan),
		"%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
		__func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
		desc->lld.mbr_cfg);

	return desc;
}

static struct dma_async_tx_descriptor *
at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
			 size_t len, unsigned long flags)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc	*desc;

	dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
		__func__, &dest, len, value, flags);

	if (unlikely(!len))
		return NULL;

	desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
	list_add_tail(&desc->desc_node, &desc->descs_list);

	desc->tx_dma_desc.cookie = -EBUSY;
	desc->tx_dma_desc.flags = flags;
	desc->xfer_size = len;

	return &desc->tx_dma_desc;
}

static struct dma_async_tx_descriptor *
at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
			    unsigned int sg_len, int value,
			    unsigned long flags)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc	*desc, *pdesc = NULL,
				*ppdesc = NULL, *first = NULL;
	struct scatterlist	*sg, *psg = NULL, *ppsg = NULL;
	size_t			stride = 0, pstride = 0, len = 0;
	int			i;

	if (!sgl)
		return NULL;

	dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
		__func__, sg_len, value, flags);

	/* Prepare descriptors. */
	for_each_sg(sgl, sg, sg_len, i) {
		dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
			__func__, &sg_dma_address(sg), sg_dma_len(sg),
			value, flags);
		desc = at_xdmac_memset_create_desc(chan, atchan,
						   sg_dma_address(sg),
						   sg_dma_len(sg),
						   value);
		if (!desc && first)
			list_splice_init(&first->descs_list,
					 &atchan->free_descs_list);

		if (!first)
			first = desc;

		/* Update our strides */
		pstride = stride;
		if (psg)
			stride = sg_dma_address(sg) -
				(sg_dma_address(psg) + sg_dma_len(psg));

		/*
		 * The scatterlist API gives us only the address and
		 * length of each elements.
		 *
		 * Unfortunately, we don't have the stride, which we
		 * will need to compute.
		 *
		 * That make us end up in a situation like this one:
		 *    len    stride    len    stride    len
		 * +-------+        +-------+        +-------+
		 * |  N-2  |        |  N-1  |        |   N   |
		 * +-------+        +-------+        +-------+
		 *
		 * We need all these three elements (N-2, N-1 and N)
		 * to actually take the decision on whether we need to
		 * queue N-1 or reuse N-2.
		 *
		 * We will only consider N if it is the last element.
		 */
		if (ppdesc && pdesc) {
			if ((stride == pstride) &&
			    (sg_dma_len(ppsg) == sg_dma_len(psg))) {
				dev_dbg(chan2dev(chan),
					"%s: desc 0x%p can be merged with desc 0x%p\n",
					__func__, pdesc, ppdesc);

				/*
				 * Increment the block count of the
				 * N-2 descriptor
				 */
				at_xdmac_increment_block_count(chan, ppdesc);
				ppdesc->lld.mbr_dus = stride;

				/*
				 * Put back the N-1 descriptor in the
				 * free descriptor list
				 */
				list_add_tail(&pdesc->desc_node,
					      &atchan->free_descs_list);

				/*
				 * Make our N-1 descriptor pointer
				 * point to the N-2 since they were
				 * actually merged.
				 */
				pdesc = ppdesc;

			/*
			 * Rule out the case where we don't have
			 * pstride computed yet (our second sg
			 * element)
			 *
			 * We also want to catch the case where there
			 * would be a negative stride,
			 */
			} else if (pstride ||
				   sg_dma_address(sg) < sg_dma_address(psg)) {
				/*
				 * Queue the N-1 descriptor after the
				 * N-2
				 */
				at_xdmac_queue_desc(chan, ppdesc, pdesc);

				/*
				 * Add the N-1 descriptor to the list
				 * of the descriptors used for this
				 * transfer
				 */
				list_add_tail(&desc->desc_node,
					      &first->descs_list);
				dev_dbg(chan2dev(chan),
					"%s: add desc 0x%p to descs_list 0x%p\n",
					__func__, desc, first);
			}
		}

		/*
		 * If we are the last element, just see if we have the
		 * same size than the previous element.
		 *
		 * If so, we can merge it with the previous descriptor
		 * since we don't care about the stride anymore.
		 */
		if ((i == (sg_len - 1)) &&
		    sg_dma_len(psg) == sg_dma_len(sg)) {
			dev_dbg(chan2dev(chan),
				"%s: desc 0x%p can be merged with desc 0x%p\n",
				__func__, desc, pdesc);

			/*
			 * Increment the block count of the N-1
			 * descriptor
			 */
			at_xdmac_increment_block_count(chan, pdesc);
			pdesc->lld.mbr_dus = stride;

			/*
			 * Put back the N descriptor in the free
			 * descriptor list
			 */
			list_add_tail(&desc->desc_node,
				      &atchan->free_descs_list);
		}

		/* Update our descriptors */
		ppdesc = pdesc;
		pdesc = desc;

		/* Update our scatter pointers */
		ppsg = psg;
		psg = sg;

		len += sg_dma_len(sg);
	}

	first->tx_dma_desc.cookie = -EBUSY;
	first->tx_dma_desc.flags = flags;
	first->xfer_size = len;

	return &first->tx_dma_desc;
}

static enum dma_status
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
		struct dma_tx_state *txstate)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
	struct at_xdmac_desc	*desc, *_desc;
	struct list_head	*descs_list;
	enum dma_status		ret;
	int			residue, retry;
	u32			cur_nda, check_nda, cur_ubc, mask, value;
	u8			dwidth = 0;
	unsigned long		flags;
	bool			initd;

	ret = dma_cookie_status(chan, cookie, txstate);
	if (ret == DMA_COMPLETE)
		return ret;

	if (!txstate)
		return ret;

	spin_lock_irqsave(&atchan->lock, flags);

	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);

	/*
	 * If the transfer has not been started yet, don't need to compute the
	 * residue, it's the transfer length.
	 */
	if (!desc->active_xfer) {
		dma_set_residue(txstate, desc->xfer_size);
		goto spin_unlock;
	}

	residue = desc->xfer_size;
	/*
	 * Flush FIFO: only relevant when the transfer is source peripheral
	 * synchronized. Flush is needed before reading CUBC because data in
	 * the FIFO are not reported by CUBC. Reporting a residue of the
	 * transfer length while we have data in FIFO can cause issue.
	 * Usecase: atmel USART has a timeout which means I have received
	 * characters but there is no more character received for a while. On
	 * timeout, it requests the residue. If the data are in the DMA FIFO,
	 * we will return a residue of the transfer length. It means no data
	 * received. If an application is waiting for these data, it will hang
	 * since we won't have another USART timeout without receiving new
	 * data.
	 */
	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
	if ((desc->lld.mbr_cfg & mask) == value) {
		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
			cpu_relax();
	}

	/*
	 * The easiest way to compute the residue should be to pause the DMA
	 * but doing this can lead to miss some data as some devices don't
	 * have FIFO.
	 * We need to read several registers because:
	 * - DMA is running therefore a descriptor change is possible while
	 * reading these registers
	 * - When the block transfer is done, the value of the CUBC register
	 * is set to its initial value until the fetch of the next descriptor.
	 * This value will corrupt the residue calculation so we have to skip
	 * it.
	 *
	 * INITD --------                    ------------
	 *              |____________________|
	 *       _______________________  _______________
	 * NDA       @desc2             \/   @desc3
	 *       _______________________/\_______________
	 *       __________  ___________  _______________
	 * CUBC       0    \/ MAX desc1 \/  MAX desc2
	 *       __________/\___________/\_______________
	 *
	 * Since descriptors are aligned on 64 bits, we can assume that
	 * the update of NDA and CUBC is atomic.
	 * Memory barriers are used to ensure the read order of the registers.
	 * A max number of retries is set because unlikely it could never ends.
	 */
	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
		rmb();
		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
		rmb();
		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
		rmb();
		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
		rmb();

		if ((check_nda == cur_nda) && initd)
			break;
	}

	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
		ret = DMA_ERROR;
		goto spin_unlock;
	}

	/*
	 * Flush FIFO: only relevant when the transfer is source peripheral
	 * synchronized. Another flush is needed here because CUBC is updated
	 * when the controller sends the data write command. It can lead to
	 * report data that are not written in the memory or the device. The
	 * FIFO flush ensures that data are really written.
	 */
	if ((desc->lld.mbr_cfg & mask) == value) {
		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
			cpu_relax();
	}

	/*
	 * Remove size of all microblocks already transferred and the current
	 * one. Then add the remaining size to transfer of the current
	 * microblock.
	 */
	descs_list = &desc->descs_list;
	list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
		residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
			break;
	}
	residue += cur_ubc << dwidth;

	dma_set_residue(txstate, residue);

	dev_dbg(chan2dev(chan),
		 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
		 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);

spin_unlock:
	spin_unlock_irqrestore(&atchan->lock, flags);
	return ret;
}

/* Call must be protected by lock. */
static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
				    struct at_xdmac_desc *desc)
{
	dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);

	/*
	 * Remove the transfer from the transfer list then move the transfer
	 * descriptors into the free descriptors list.
	 */
	list_del(&desc->xfer_node);
	list_splice_init(&desc->descs_list, &atchan->free_descs_list);
}

static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
	struct at_xdmac_desc	*desc;
	unsigned long		flags;

	spin_lock_irqsave(&atchan->lock, flags);

	/*
	 * If channel is enabled, do nothing, advance_work will be triggered
	 * after the interruption.
	 */
	if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
		desc = list_first_entry(&atchan->xfers_list,
					struct at_xdmac_desc,
					xfer_node);
		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
		if (!desc->active_xfer)
			at_xdmac_start_xfer(atchan, desc);
	}

	spin_unlock_irqrestore(&atchan->lock, flags);
}

static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
{
	struct at_xdmac_desc		*desc;
	struct dma_async_tx_descriptor	*txd;

	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
	txd = &desc->tx_dma_desc;

	if (txd->flags & DMA_PREP_INTERRUPT)
		dmaengine_desc_get_callback_invoke(txd, NULL);
}

static void at_xdmac_tasklet(unsigned long data)
{
	struct at_xdmac_chan	*atchan = (struct at_xdmac_chan *)data;
	struct at_xdmac_desc	*desc;
	u32			error_mask;

	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
		__func__, atchan->irq_status);

	error_mask = AT_XDMAC_CIS_RBEIS
		     | AT_XDMAC_CIS_WBEIS
		     | AT_XDMAC_CIS_ROIS;

	if (at_xdmac_chan_is_cyclic(atchan)) {
		at_xdmac_handle_cyclic(atchan);
	} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
		   || (atchan->irq_status & error_mask)) {
		struct dma_async_tx_descriptor  *txd;

		if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
			dev_err(chan2dev(&atchan->chan), "read bus error!!!");
		if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
			dev_err(chan2dev(&atchan->chan), "write bus error!!!");
		if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
			dev_err(chan2dev(&atchan->chan), "request overflow error!!!");

		spin_lock_bh(&atchan->lock);
		desc = list_first_entry(&atchan->xfers_list,
					struct at_xdmac_desc,
					xfer_node);
		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
		if (!desc->active_xfer) {
			dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
			spin_unlock(&atchan->lock);
			return;
		}

		txd = &desc->tx_dma_desc;

		at_xdmac_remove_xfer(atchan, desc);
		spin_unlock_bh(&atchan->lock);

		if (!at_xdmac_chan_is_cyclic(atchan)) {
			dma_cookie_complete(txd);
			if (txd->flags & DMA_PREP_INTERRUPT)
				dmaengine_desc_get_callback_invoke(txd, NULL);
		}

		dma_run_dependencies(txd);

		at_xdmac_advance_work(atchan);
	}
}

static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
{
	struct at_xdmac		*atxdmac = (struct at_xdmac *)dev_id;
	struct at_xdmac_chan	*atchan;
	u32			imr, status, pending;
	u32			chan_imr, chan_status;
	int			i, ret = IRQ_NONE;

	do {
		imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
		status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
		pending = status & imr;

		dev_vdbg(atxdmac->dma.dev,
			 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
			 __func__, status, imr, pending);

		if (!pending)
			break;

		/* We have to find which channel has generated the interrupt. */
		for (i = 0; i < atxdmac->dma.chancnt; i++) {
			if (!((1 << i) & pending))
				continue;

			atchan = &atxdmac->chan[i];
			chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
			chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
			atchan->irq_status = chan_status & chan_imr;
			dev_vdbg(atxdmac->dma.dev,
				 "%s: chan%d: imr=0x%x, status=0x%x\n",
				 __func__, i, chan_imr, chan_status);
			dev_vdbg(chan2dev(&atchan->chan),
				 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
				 __func__,
				 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
				 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
				 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
				 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));

			if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
				at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);

			tasklet_schedule(&atchan->tasklet);
			ret = IRQ_HANDLED;
		}

	} while (pending);

	return ret;
}

static void at_xdmac_issue_pending(struct dma_chan *chan)
{
	struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);

	dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);

	if (!at_xdmac_chan_is_cyclic(atchan))
		at_xdmac_advance_work(atchan);

	return;
}

static int at_xdmac_device_config(struct dma_chan *chan,
				  struct dma_slave_config *config)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	int ret;
	unsigned long		flags;

	dev_dbg(chan2dev(chan), "%s\n", __func__);

	spin_lock_irqsave(&atchan->lock, flags);
	ret = at_xdmac_set_slave_config(chan, config);
	spin_unlock_irqrestore(&atchan->lock, flags);

	return ret;
}

static int at_xdmac_device_pause(struct dma_chan *chan)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
	unsigned long		flags;

	dev_dbg(chan2dev(chan), "%s\n", __func__);

	if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
		return 0;

	spin_lock_irqsave(&atchan->lock, flags);
	at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
	while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
	       & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
		cpu_relax();
	spin_unlock_irqrestore(&atchan->lock, flags);

	return 0;
}

static int at_xdmac_device_resume(struct dma_chan *chan)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
	unsigned long		flags;

	dev_dbg(chan2dev(chan), "%s\n", __func__);

	spin_lock_irqsave(&atchan->lock, flags);
	if (!at_xdmac_chan_is_paused(atchan)) {
		spin_unlock_irqrestore(&atchan->lock, flags);
		return 0;
	}

	at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
	spin_unlock_irqrestore(&atchan->lock, flags);

	return 0;
}

static int at_xdmac_device_terminate_all(struct dma_chan *chan)
{
	struct at_xdmac_desc	*desc, *_desc;
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
	unsigned long		flags;

	dev_dbg(chan2dev(chan), "%s\n", __func__);

	spin_lock_irqsave(&atchan->lock, flags);
	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
		cpu_relax();

	/* Cancel all pending transfers. */
	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
		at_xdmac_remove_xfer(atchan, desc);

	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
	spin_unlock_irqrestore(&atchan->lock, flags);

	return 0;
}

static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac_desc	*desc;
	int			i;
	unsigned long		flags;

	spin_lock_irqsave(&atchan->lock, flags);

	if (at_xdmac_chan_is_enabled(atchan)) {
		dev_err(chan2dev(chan),
			"can't allocate channel resources (channel enabled)\n");
		i = -EIO;
		goto spin_unlock;
	}

	if (!list_empty(&atchan->free_descs_list)) {
		dev_err(chan2dev(chan),
			"can't allocate channel resources (channel not free from a previous use)\n");
		i = -EIO;
		goto spin_unlock;
	}

	for (i = 0; i < init_nr_desc_per_channel; i++) {
		desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
		if (!desc) {
			dev_warn(chan2dev(chan),
				"only %d descriptors have been allocated\n", i);
			break;
		}
		list_add_tail(&desc->desc_node, &atchan->free_descs_list);
	}

	dma_cookie_init(chan);

	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);

spin_unlock:
	spin_unlock_irqrestore(&atchan->lock, flags);
	return i;
}

static void at_xdmac_free_chan_resources(struct dma_chan *chan)
{
	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
	struct at_xdmac_desc	*desc, *_desc;

	list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
		dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
		list_del(&desc->desc_node);
		dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
	}

	return;
}

#ifdef CONFIG_PM
static int atmel_xdmac_prepare(struct device *dev)
{
	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
	struct dma_chan		*chan, *_chan;

	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);

		/* Wait for transfer completion, except in cyclic case. */
		if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
			return -EAGAIN;
	}
	return 0;
}
#else
#	define atmel_xdmac_prepare NULL
#endif

#ifdef CONFIG_PM_SLEEP
static int atmel_xdmac_suspend(struct device *dev)
{
	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
	struct dma_chan		*chan, *_chan;

	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);

		atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
		if (at_xdmac_chan_is_cyclic(atchan)) {
			if (!at_xdmac_chan_is_paused(atchan))
				at_xdmac_device_pause(chan);
			atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
			atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
			atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
		}
	}
	atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);

	at_xdmac_off(atxdmac);
	clk_disable_unprepare(atxdmac->clk);
	return 0;
}

static int atmel_xdmac_resume(struct device *dev)
{
	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
	struct at_xdmac_chan	*atchan;
	struct dma_chan		*chan, *_chan;
	int			i;
	int ret;

	ret = clk_prepare_enable(atxdmac->clk);
	if (ret)
		return ret;

	/* Clear pending interrupts. */
	for (i = 0; i < atxdmac->dma.chancnt; i++) {
		atchan = &atxdmac->chan[i];
		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
			cpu_relax();
	}

	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
		atchan = to_at_xdmac_chan(chan);
		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
		if (at_xdmac_chan_is_cyclic(atchan)) {
			if (at_xdmac_chan_is_paused(atchan))
				at_xdmac_device_resume(chan);
			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
			wmb();
			at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
		}
	}
	return 0;
}
#endif /* CONFIG_PM_SLEEP */

static int at_xdmac_probe(struct platform_device *pdev)
{
	struct resource	*res;
	struct at_xdmac	*atxdmac;
	int		irq, size, nr_channels, i, ret;
	void __iomem	*base;
	u32		reg;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -EINVAL;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;

	base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);

	/*
	 * Read number of xdmac channels, read helper function can't be used
	 * since atxdmac is not yet allocated and we need to know the number
	 * of channels to do the allocation.
	 */
	reg = readl_relaxed(base + AT_XDMAC_GTYPE);
	nr_channels = AT_XDMAC_NB_CH(reg);
	if (nr_channels > AT_XDMAC_MAX_CHAN) {
		dev_err(&pdev->dev, "invalid number of channels (%u)\n",
			nr_channels);
		return -EINVAL;
	}

	size = sizeof(*atxdmac);
	size += nr_channels * sizeof(struct at_xdmac_chan);
	atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
	if (!atxdmac) {
		dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
		return -ENOMEM;
	}

	atxdmac->regs = base;
	atxdmac->irq = irq;

	atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
	if (IS_ERR(atxdmac->clk)) {
		dev_err(&pdev->dev, "can't get dma_clk\n");
		return PTR_ERR(atxdmac->clk);
	}

	/* Do not use dev res to prevent races with tasklet */
	ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
	if (ret) {
		dev_err(&pdev->dev, "can't request irq\n");
		return ret;
	}

	ret = clk_prepare_enable(atxdmac->clk);
	if (ret) {
		dev_err(&pdev->dev, "can't prepare or enable clock\n");
		goto err_free_irq;
	}

	atxdmac->at_xdmac_desc_pool =
		dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
				sizeof(struct at_xdmac_desc), 4, 0);
	if (!atxdmac->at_xdmac_desc_pool) {
		dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
		ret = -ENOMEM;
		goto err_clk_disable;
	}

	dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
	dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
	dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
	dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
	/*
	 * Without DMA_PRIVATE the driver is not able to allocate more than
	 * one channel, second allocation fails in private_candidate.
	 */
	dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
	atxdmac->dma.dev				= &pdev->dev;
	atxdmac->dma.device_alloc_chan_resources	= at_xdmac_alloc_chan_resources;
	atxdmac->dma.device_free_chan_resources		= at_xdmac_free_chan_resources;
	atxdmac->dma.device_tx_status			= at_xdmac_tx_status;
	atxdmac->dma.device_issue_pending		= at_xdmac_issue_pending;
	atxdmac->dma.device_prep_dma_cyclic		= at_xdmac_prep_dma_cyclic;
	atxdmac->dma.device_prep_interleaved_dma	= at_xdmac_prep_interleaved;
	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
	atxdmac->dma.device_prep_dma_memset		= at_xdmac_prep_dma_memset;
	atxdmac->dma.device_prep_dma_memset_sg		= at_xdmac_prep_dma_memset_sg;
	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
	atxdmac->dma.device_config			= at_xdmac_device_config;
	atxdmac->dma.device_pause			= at_xdmac_device_pause;
	atxdmac->dma.device_resume			= at_xdmac_device_resume;
	atxdmac->dma.device_terminate_all		= at_xdmac_device_terminate_all;
	atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
	atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
	atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;

	/* Disable all chans and interrupts. */
	at_xdmac_off(atxdmac);

	/* Init channels. */
	INIT_LIST_HEAD(&atxdmac->dma.channels);
	for (i = 0; i < nr_channels; i++) {
		struct at_xdmac_chan *atchan = &atxdmac->chan[i];

		atchan->chan.device = &atxdmac->dma;
		list_add_tail(&atchan->chan.device_node,
			      &atxdmac->dma.channels);

		atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
		atchan->mask = 1 << i;

		spin_lock_init(&atchan->lock);
		INIT_LIST_HEAD(&atchan->xfers_list);
		INIT_LIST_HEAD(&atchan->free_descs_list);
		tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
			     (unsigned long)atchan);

		/* Clear pending interrupts. */
		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
			cpu_relax();
	}
	platform_set_drvdata(pdev, atxdmac);

	ret = dma_async_device_register(&atxdmac->dma);
	if (ret) {
		dev_err(&pdev->dev, "fail to register DMA engine device\n");
		goto err_clk_disable;
	}

	ret = of_dma_controller_register(pdev->dev.of_node,
					 at_xdmac_xlate, atxdmac);
	if (ret) {
		dev_err(&pdev->dev, "could not register of dma controller\n");
		goto err_dma_unregister;
	}

	dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
		 nr_channels, atxdmac->regs);

	return 0;

err_dma_unregister:
	dma_async_device_unregister(&atxdmac->dma);
err_clk_disable:
	clk_disable_unprepare(atxdmac->clk);
err_free_irq:
	free_irq(atxdmac->irq, atxdmac);
	return ret;
}

static int at_xdmac_remove(struct platform_device *pdev)
{
	struct at_xdmac	*atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
	int		i;

	at_xdmac_off(atxdmac);
	of_dma_controller_free(pdev->dev.of_node);
	dma_async_device_unregister(&atxdmac->dma);
	clk_disable_unprepare(atxdmac->clk);

	free_irq(atxdmac->irq, atxdmac);

	for (i = 0; i < atxdmac->dma.chancnt; i++) {
		struct at_xdmac_chan *atchan = &atxdmac->chan[i];

		tasklet_kill(&atchan->tasklet);
		at_xdmac_free_chan_resources(&atchan->chan);
	}

	return 0;
}

static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
	.prepare	= atmel_xdmac_prepare,
	SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
};

static const struct of_device_id atmel_xdmac_dt_ids[] = {
	{
		.compatible = "atmel,sama5d4-dma",
	}, {
		/* sentinel */
	}
};
MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);

static struct platform_driver at_xdmac_driver = {
	.probe		= at_xdmac_probe,
	.remove		= at_xdmac_remove,
	.driver = {
		.name		= "at_xdmac",
		.of_match_table	= of_match_ptr(atmel_xdmac_dt_ids),
		.pm		= &atmel_xdmac_dev_pm_ops,
	}
};

static int __init at_xdmac_init(void)
{
	return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
}
subsys_initcall(at_xdmac_init);

MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
MODULE_LICENSE("GPL");