Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
/*
 * GPL HEADER START
 *
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 only,
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License version 2 for more details (a copy is included
 * in the LICENSE file that accompanied this code).
 *
 * You should have received a copy of the GNU General Public License
 * version 2 along with this program; If not, see
 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
 *
 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 * CA 95054 USA or visit www.sun.com if you need additional information or
 * have any questions.
 *
 * GPL HEADER END
 */
/*
 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
 * Use is subject to license terms.
 *
 * Copyright (c) 2011, 2012, Intel Corporation.
 */
/*
 * This file is part of Lustre, http://www.lustre.org/
 * Lustre is a trademark of Sun Microsystems, Inc.
 *
 * lustre/include/lustre/lustre_idl.h
 *
 * Lustre wire protocol definitions.
 */

/** \defgroup lustreidl lustreidl
 *
 * Lustre wire protocol definitions.
 *
 * ALL structs passing over the wire should be declared here.  Structs
 * that are used in interfaces with userspace should go in lustre_user.h.
 *
 * All structs being declared here should be built from simple fixed-size
 * types (__u8, __u16, __u32, __u64) or be built from other types or
 * structs also declared in this file.  Similarly, all flags and magic
 * values in those structs should also be declared here.  This ensures
 * that the Lustre wire protocol is not influenced by external dependencies.
 *
 * The only other acceptable items in this file are VERY SIMPLE accessor
 * functions to avoid callers grubbing inside the structures, and the
 * prototypes of the swabber functions for each struct.  Nothing that
 * depends on external functions or definitions should be in here.
 *
 * Structs must be properly aligned to put 64-bit values on an 8-byte
 * boundary.  Any structs being added here must also be added to
 * utils/wirecheck.c and "make newwiretest" run to regenerate the
 * utils/wiretest.c sources.  This allows us to verify that wire structs
 * have the proper alignment/size on all architectures.
 *
 * DO NOT CHANGE any of the structs, flags, values declared here and used
 * in released Lustre versions.  Some structs may have padding fields that
 * can be used.  Some structs might allow addition at the end (verify this
 * in the code to ensure that new/old clients that see this larger struct
 * do not fail, otherwise you need to implement protocol compatibility).
 *
 * We assume all nodes are either little-endian or big-endian, and we
 * always send messages in the sender's native format.  The receiver
 * detects the message format by checking the 'magic' field of the message
 * (see lustre_msg_swabbed() below).
 *
 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
 * implemented either here, inline (trivial implementations) or in
 * ptlrpc/pack_generic.c.  These 'swabbers' convert the type from "other"
 * endian, in-place in the message buffer.
 *
 * A swabber takes a single pointer argument.  The caller must already have
 * verified that the length of the message buffer >= sizeof (type).
 *
 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
 * may be defined that swabs just the variable part, after the caller has
 * verified that the message buffer is large enough.
 *
 * @{
 */

#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_

#if !defined(LASSERT) && !defined(LPU64)
#include <linux/libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
#endif

/* Defn's shared with user-space. */
#include <lustre/lustre_user.h>

/*
 *  GENERAL STUFF
 */
/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
 * FOO_REPLY_PORTAL   is for incoming replies on the FOO
 * FOO_BULK_PORTAL    is for incoming bulk on the FOO
 */

#define CONNMGR_REQUEST_PORTAL	  1
#define CONNMGR_REPLY_PORTAL	    2
//#define OSC_REQUEST_PORTAL	    3
#define OSC_REPLY_PORTAL		4
//#define OSC_BULK_PORTAL	       5
#define OST_IO_PORTAL		   6
#define OST_CREATE_PORTAL	       7
#define OST_BULK_PORTAL		 8
//#define MDC_REQUEST_PORTAL	    9
#define MDC_REPLY_PORTAL	       10
//#define MDC_BULK_PORTAL	      11
#define MDS_REQUEST_PORTAL	     12
//#define MDS_REPLY_PORTAL	     13
#define MDS_BULK_PORTAL		14
#define LDLM_CB_REQUEST_PORTAL	 15
#define LDLM_CB_REPLY_PORTAL	   16
#define LDLM_CANCEL_REQUEST_PORTAL     17
#define LDLM_CANCEL_REPLY_PORTAL       18
//#define PTLBD_REQUEST_PORTAL	   19
//#define PTLBD_REPLY_PORTAL	     20
//#define PTLBD_BULK_PORTAL	      21
#define MDS_SETATTR_PORTAL	     22
#define MDS_READPAGE_PORTAL	    23
#define MDS_MDS_PORTAL		 24

#define MGC_REPLY_PORTAL	       25
#define MGS_REQUEST_PORTAL	     26
#define MGS_REPLY_PORTAL	       27
#define OST_REQUEST_PORTAL	     28
#define FLD_REQUEST_PORTAL	     29
#define SEQ_METADATA_PORTAL	    30
#define SEQ_DATA_PORTAL		31
#define SEQ_CONTROLLER_PORTAL	  32
#define MGS_BULK_PORTAL		33

/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */

/* packet types */
#define PTL_RPC_MSG_REQUEST 4711
#define PTL_RPC_MSG_ERR     4712
#define PTL_RPC_MSG_REPLY   4713

/* DON'T use swabbed values of MAGIC as magic! */
#define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3

#define LUSTRE_MSG_MAGIC_V1_SWABBED 0xD00BD00B
#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B

#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2

#define PTLRPC_MSG_VERSION  0x00000003
#define LUSTRE_VERSION_MASK 0xffff0000
#define LUSTRE_OBD_VERSION  0x00010000
#define LUSTRE_MDS_VERSION  0x00020000
#define LUSTRE_OST_VERSION  0x00030000
#define LUSTRE_DLM_VERSION  0x00040000
#define LUSTRE_LOG_VERSION  0x00050000
#define LUSTRE_MGS_VERSION  0x00060000

typedef __u32 mdsno_t;
typedef __u64 seqno_t;
typedef __u64 obd_id;
typedef __u64 obd_seq;
typedef __s64 obd_time;
typedef __u64 obd_size;
typedef __u64 obd_off;
typedef __u64 obd_blocks;
typedef __u64 obd_valid;
typedef __u32 obd_blksize;
typedef __u32 obd_mode;
typedef __u32 obd_uid;
typedef __u32 obd_gid;
typedef __u32 obd_flag;
typedef __u32 obd_count;

/**
 * Describes a range of sequence, lsr_start is included but lsr_end is
 * not in the range.
 * Same structure is used in fld module where lsr_index field holds mdt id
 * of the home mdt.
 */
struct lu_seq_range {
	__u64 lsr_start;
	__u64 lsr_end;
	__u32 lsr_index;
	__u32 lsr_flags;
};

#define LU_SEQ_RANGE_MDT	0x0
#define LU_SEQ_RANGE_OST	0x1
#define LU_SEQ_RANGE_ANY	0x3

#define LU_SEQ_RANGE_MASK	0x3

static inline unsigned fld_range_type(const struct lu_seq_range *range)
{
	return range->lsr_flags & LU_SEQ_RANGE_MASK;
}

static inline int fld_range_is_ost(const struct lu_seq_range *range)
{
	return fld_range_type(range) == LU_SEQ_RANGE_OST;
}

static inline int fld_range_is_mdt(const struct lu_seq_range *range)
{
	return fld_range_type(range) == LU_SEQ_RANGE_MDT;
}

/**
 * This all range is only being used when fld client sends fld query request,
 * but it does not know whether the seq is MDT or OST, so it will send req
 * with ALL type, which means either seq type gotten from lookup can be
 * expected.
 */
static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
{
	return fld_range_type(range) == LU_SEQ_RANGE_ANY;
}

static inline void fld_range_set_type(struct lu_seq_range *range,
				      unsigned flags)
{
	LASSERT(!(flags & ~LU_SEQ_RANGE_MASK));
	range->lsr_flags |= flags;
}

static inline void fld_range_set_mdt(struct lu_seq_range *range)
{
	fld_range_set_type(range, LU_SEQ_RANGE_MDT);
}

static inline void fld_range_set_ost(struct lu_seq_range *range)
{
	fld_range_set_type(range, LU_SEQ_RANGE_OST);
}

static inline void fld_range_set_any(struct lu_seq_range *range)
{
	fld_range_set_type(range, LU_SEQ_RANGE_ANY);
}

/**
 * returns  width of given range \a r
 */

static inline __u64 range_space(const struct lu_seq_range *range)
{
	return range->lsr_end - range->lsr_start;
}

/**
 * initialize range to zero
 */

static inline void range_init(struct lu_seq_range *range)
{
	range->lsr_start = range->lsr_end = range->lsr_index = 0;
}

/**
 * check if given seq id \a s is within given range \a r
 */

static inline int range_within(const struct lu_seq_range *range,
			       __u64 s)
{
	return s >= range->lsr_start && s < range->lsr_end;
}

static inline int range_is_sane(const struct lu_seq_range *range)
{
	return (range->lsr_end >= range->lsr_start);
}

static inline int range_is_zero(const struct lu_seq_range *range)
{
	return (range->lsr_start == 0 && range->lsr_end == 0);
}

static inline int range_is_exhausted(const struct lu_seq_range *range)

{
	return range_space(range) == 0;
}

/* return 0 if two range have the same location */
static inline int range_compare_loc(const struct lu_seq_range *r1,
				    const struct lu_seq_range *r2)
{
	return r1->lsr_index != r2->lsr_index ||
	       r1->lsr_flags != r2->lsr_flags;
}

#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s"

#define PRANGE(range)		\
	(range)->lsr_start,	\
	(range)->lsr_end,	\
	(range)->lsr_index,	\
	fld_range_is_mdt(range) ? "mdt" : "ost"


/** \defgroup lu_fid lu_fid
 * @{ */

/**
 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
 * xattr.
 */
enum lma_compat {
	LMAC_HSM = 0x00000001,
	LMAC_SOM = 0x00000002,
};

/**
 * Masks for all features that should be supported by a Lustre version to
 * access a specific file.
 * This information is stored in lustre_mdt_attrs::lma_incompat.
 */
enum lma_incompat {
	LMAI_RELEASED = 0x0000001, /* file is released */
	LMAI_AGENT = 0x00000002, /* agent inode */
	LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
					    is on the remote MDT */
};
#define LMA_INCOMPAT_SUPP	(LMAI_AGENT | LMAI_REMOTE_PARENT)

extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
			    const struct lu_fid *fid, __u32 incompat);
/**
 * SOM on-disk attributes stored in a separate xattr.
 */
struct som_attrs {
	/** Bitfield for supported data in this structure. For future use. */
	__u32	som_compat;

	/** Incompat feature list. The supported feature mask is availabe in
	 * SOM_INCOMPAT_SUPP */
	__u32	som_incompat;

	/** IO Epoch SOM attributes belongs to */
	__u64	som_ioepoch;
	/** total file size in objects */
	__u64	som_size;
	/** total fs blocks in objects */
	__u64	som_blocks;
	/** mds mount id the size is valid for */
	__u64	som_mountid;
};
extern void lustre_som_swab(struct som_attrs *attrs);

#define SOM_INCOMPAT_SUPP 0x0

/**
 * HSM on-disk attributes stored in a separate xattr.
 */
struct hsm_attrs {
	/** Bitfield for supported data in this structure. For future use. */
	__u32	hsm_compat;

	/** HSM flags, see hsm_flags enum below */
	__u32	hsm_flags;
	/** backend archive id associated with the file */
	__u64	hsm_arch_id;
	/** version associated with the last archiving, if any */
	__u64	hsm_arch_ver;
};
extern void lustre_hsm_swab(struct hsm_attrs *attrs);

/**
 * fid constants
 */
enum {
	/** LASTID file has zero OID */
	LUSTRE_FID_LASTID_OID = 0UL,
	/** initial fid id value */
	LUSTRE_FID_INIT_OID  = 1UL
};

/** returns fid object sequence */
static inline __u64 fid_seq(const struct lu_fid *fid)
{
	return fid->f_seq;
}

/** returns fid object id */
static inline __u32 fid_oid(const struct lu_fid *fid)
{
	return fid->f_oid;
}

/** returns fid object version */
static inline __u32 fid_ver(const struct lu_fid *fid)
{
	return fid->f_ver;
}

static inline void fid_zero(struct lu_fid *fid)
{
	memset(fid, 0, sizeof(*fid));
}

static inline obd_id fid_ver_oid(const struct lu_fid *fid)
{
	return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
}

/**
 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
 * used for other purposes and not risk collisions with existing inodes.
 *
 * Different FID Format
 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
 */
enum fid_seq {
	FID_SEQ_OST_MDT0	= 0,
	FID_SEQ_LLOG		= 1, /* unnamed llogs */
	FID_SEQ_ECHO		= 2,
	FID_SEQ_OST_MDT1	= 3,
	FID_SEQ_OST_MAX		= 9, /* Max MDT count before OST_on_FID */
	FID_SEQ_LLOG_NAME	= 10, /* named llogs */
	FID_SEQ_RSVD		= 11,
	FID_SEQ_IGIF		= 12,
	FID_SEQ_IGIF_MAX	= 0x0ffffffffULL,
	FID_SEQ_IDIF		= 0x100000000ULL,
	FID_SEQ_IDIF_MAX	= 0x1ffffffffULL,
	/* Normal FID sequence starts from this value, i.e. 1<<33 */
	FID_SEQ_START		= 0x200000000ULL,
	/* sequence for local pre-defined FIDs listed in local_oid */
	FID_SEQ_LOCAL_FILE	= 0x200000001ULL,
	FID_SEQ_DOT_LUSTRE	= 0x200000002ULL,
	/* sequence is used for local named objects FIDs generated
	 * by local_object_storage library */
	FID_SEQ_LOCAL_NAME	= 0x200000003ULL,
	/* Because current FLD will only cache the fid sequence, instead
	 * of oid on the client side, if the FID needs to be exposed to
	 * clients sides, it needs to make sure all of fids under one
	 * sequence will be located in one MDT. */
	FID_SEQ_SPECIAL		= 0x200000004ULL,
	FID_SEQ_QUOTA		= 0x200000005ULL,
	FID_SEQ_QUOTA_GLB	= 0x200000006ULL,
	FID_SEQ_ROOT		= 0x200000007ULL,  /* Located on MDT0 */
	FID_SEQ_NORMAL		= 0x200000400ULL,
	FID_SEQ_LOV_DEFAULT	= 0xffffffffffffffffULL
};

#define OBIF_OID_MAX_BITS	   32
#define OBIF_MAX_OID		(1ULL << OBIF_OID_MAX_BITS)
#define OBIF_OID_MASK	       ((1ULL << OBIF_OID_MAX_BITS) - 1)
#define IDIF_OID_MAX_BITS	   48
#define IDIF_MAX_OID		(1ULL << IDIF_OID_MAX_BITS)
#define IDIF_OID_MASK	       ((1ULL << IDIF_OID_MAX_BITS) - 1)

/** OID for FID_SEQ_SPECIAL */
enum special_oid {
	/* Big Filesystem Lock to serialize rename operations */
	FID_OID_SPECIAL_BFL     = 1UL,
};

/** OID for FID_SEQ_DOT_LUSTRE */
enum dot_lustre_oid {
	FID_OID_DOT_LUSTRE  = 1UL,
	FID_OID_DOT_LUSTRE_OBF = 2UL,
};

static inline int fid_seq_is_mdt0(obd_seq seq)
{
	return (seq == FID_SEQ_OST_MDT0);
}

static inline int fid_seq_is_mdt(const __u64 seq)
{
	return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
};

static inline int fid_seq_is_echo(obd_seq seq)
{
	return (seq == FID_SEQ_ECHO);
}

static inline int fid_is_echo(const struct lu_fid *fid)
{
	return fid_seq_is_echo(fid_seq(fid));
}

static inline int fid_seq_is_llog(obd_seq seq)
{
	return (seq == FID_SEQ_LLOG);
}

static inline int fid_is_llog(const struct lu_fid *fid)
{
	/* file with OID == 0 is not llog but contains last oid */
	return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
}

static inline int fid_seq_is_rsvd(const __u64 seq)
{
	return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
};

static inline int fid_seq_is_special(const __u64 seq)
{
	return seq == FID_SEQ_SPECIAL;
};

static inline int fid_seq_is_local_file(const __u64 seq)
{
	return seq == FID_SEQ_LOCAL_FILE ||
	       seq == FID_SEQ_LOCAL_NAME;
};

static inline int fid_seq_is_root(const __u64 seq)
{
	return seq == FID_SEQ_ROOT;
}

static inline int fid_seq_is_dot(const __u64 seq)
{
	return seq == FID_SEQ_DOT_LUSTRE;
}

static inline int fid_seq_is_default(const __u64 seq)
{
	return seq == FID_SEQ_LOV_DEFAULT;
}

static inline int fid_is_mdt0(const struct lu_fid *fid)
{
	return fid_seq_is_mdt0(fid_seq(fid));
}

static inline void lu_root_fid(struct lu_fid *fid)
{
	fid->f_seq = FID_SEQ_ROOT;
	fid->f_oid = 1;
	fid->f_ver = 0;
}

/**
 * Check if a fid is igif or not.
 * \param fid the fid to be tested.
 * \return true if the fid is a igif; otherwise false.
 */
static inline int fid_seq_is_igif(const __u64 seq)
{
	return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
}

static inline int fid_is_igif(const struct lu_fid *fid)
{
	return fid_seq_is_igif(fid_seq(fid));
}

/**
 * Check if a fid is idif or not.
 * \param fid the fid to be tested.
 * \return true if the fid is a idif; otherwise false.
 */
static inline int fid_seq_is_idif(const __u64 seq)
{
	return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
}

static inline int fid_is_idif(const struct lu_fid *fid)
{
	return fid_seq_is_idif(fid_seq(fid));
}

static inline int fid_is_local_file(const struct lu_fid *fid)
{
	return fid_seq_is_local_file(fid_seq(fid));
}

static inline int fid_seq_is_norm(const __u64 seq)
{
	return (seq >= FID_SEQ_NORMAL);
}

static inline int fid_is_norm(const struct lu_fid *fid)
{
	return fid_seq_is_norm(fid_seq(fid));
}

/* convert an OST objid into an IDIF FID SEQ number */
static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx)
{
	return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
}

/* convert a packed IDIF FID into an OST objid */
static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver)
{
	return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
}

/* extract ost index from IDIF FID */
static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
{
	LASSERT(fid_is_idif(fid));
	return (fid_seq(fid) >> 16) & 0xffff;
}

/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
static inline obd_seq ostid_seq(const struct ost_id *ostid)
{
	if (fid_seq_is_mdt0(ostid->oi.oi_seq))
		return FID_SEQ_OST_MDT0;

	if (fid_seq_is_default(ostid->oi.oi_seq))
		return FID_SEQ_LOV_DEFAULT;

	if (fid_is_idif(&ostid->oi_fid))
		return FID_SEQ_OST_MDT0;

	return fid_seq(&ostid->oi_fid);
}

/* extract OST objid from a wire ost_id (id/seq) pair */
static inline obd_id ostid_id(const struct ost_id *ostid)
{
	if (fid_seq_is_mdt0(ostid_seq(ostid)))
		return ostid->oi.oi_id & IDIF_OID_MASK;

	if (fid_is_idif(&ostid->oi_fid))
		return fid_idif_id(fid_seq(&ostid->oi_fid),
				   fid_oid(&ostid->oi_fid), 0);

	return fid_oid(&ostid->oi_fid);
}

static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
{
	if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
		oi->oi.oi_seq = seq;
	} else {
		oi->oi_fid.f_seq = seq;
		/* Note: if f_oid + f_ver is zero, we need init it
		 * to be 1, otherwise, ostid_seq will treat this
		 * as old ostid (oi_seq == 0) */
		if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
			oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
	}
}

static inline void ostid_set_seq_mdt0(struct ost_id *oi)
{
	ostid_set_seq(oi, FID_SEQ_OST_MDT0);
}

static inline void ostid_set_seq_echo(struct ost_id *oi)
{
	ostid_set_seq(oi, FID_SEQ_ECHO);
}

static inline void ostid_set_seq_llog(struct ost_id *oi)
{
	ostid_set_seq(oi, FID_SEQ_LLOG);
}

/**
 * Note: we need check oi_seq to decide where to set oi_id,
 * so oi_seq should always be set ahead of oi_id.
 */
static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
	if (fid_seq_is_mdt0(ostid_seq(oi))) {
		if (oid >= IDIF_MAX_OID) {
			CERROR("Bad "LPU64" to set "DOSTID"\n",
				oid, POSTID(oi));
			return;
		}
		oi->oi.oi_id = oid;
	} else {
		if (oid > OBIF_MAX_OID) {
			CERROR("Bad "LPU64" to set "DOSTID"\n",
				oid, POSTID(oi));
			return;
		}
		oi->oi_fid.f_oid = oid;
	}
}

static inline void ostid_inc_id(struct ost_id *oi)
{
	if (fid_seq_is_mdt0(ostid_seq(oi))) {
		if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
			CERROR("Bad inc "DOSTID"\n", POSTID(oi));
			return;
		}
		oi->oi.oi_id++;
	} else {
		oi->oi_fid.f_oid++;
	}
}

static inline void ostid_dec_id(struct ost_id *oi)
{
	if (fid_seq_is_mdt0(ostid_seq(oi)))
		oi->oi.oi_id--;
	else
		oi->oi_fid.f_oid--;
}

/**
 * Unpack an OST object id/seq (group) into a FID.  This is needed for
 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
 * FIDs.  Note that if an id/seq is already in FID/IDIF format it will
 * be passed through unchanged.  Only legacy OST objects in "group 0"
 * will be mapped into the IDIF namespace so that they can fit into the
 * struct lu_fid fields without loss.  For reference see:
 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
 */
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
			       __u32 ost_idx)
{
	if (ost_idx > 0xffff) {
		CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
		       ost_idx);
		return -EBADF;
	}

	if (fid_seq_is_mdt0(ostid_seq(ostid))) {
		/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
		 * that we map into the IDIF namespace.  It allows up to 2^48
		 * objects per OST, as this is the object namespace that has
		 * been in production for years.  This can handle create rates
		 * of 1M objects/s/OST for 9 years, or combinations thereof. */
		if (ostid_id(ostid) >= IDIF_MAX_OID) {
			 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
				POSTID(ostid), ost_idx);
			 return -EBADF;
		}
		fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
		/* truncate to 32 bits by assignment */
		fid->f_oid = ostid_id(ostid);
		/* in theory, not currently used */
		fid->f_ver = ostid_id(ostid) >> 48;
	} else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
	       /* This is either an IDIF object, which identifies objects across
		* all OSTs, or a regular FID.  The IDIF namespace maps legacy
		* OST objects into the FID namespace.  In both cases, we just
		* pass the FID through, no conversion needed. */
		if (ostid->oi_fid.f_ver != 0) {
			CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
				POSTID(ostid), ost_idx);
			return -EBADF;
		}
		*fid = ostid->oi_fid;
	}

	return 0;
}

/* pack any OST FID into an ostid (id/seq) for the wire/disk */
static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
{
	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
		CERROR("bad IGIF, "DFID"\n", PFID(fid));
		return -EBADF;
	}

	if (fid_is_idif(fid)) {
		ostid_set_seq_mdt0(ostid);
		ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
						fid_ver(fid)));
	} else {
		ostid->oi_fid = *fid;
	}

	return 0;
}

/* Check whether the fid is for LAST_ID */
static inline int fid_is_last_id(const struct lu_fid *fid)
{
	return (fid_oid(fid) == 0);
}

/**
 * Get inode number from a igif.
 * \param fid a igif to get inode number from.
 * \return inode number for the igif.
 */
static inline ino_t lu_igif_ino(const struct lu_fid *fid)
{
	return fid_seq(fid);
}

extern void lustre_swab_ost_id(struct ost_id *oid);

/**
 * Get inode generation from a igif.
 * \param fid a igif to get inode generation from.
 * \return inode generation for the igif.
 */
static inline __u32 lu_igif_gen(const struct lu_fid *fid)
{
	return fid_oid(fid);
}

/**
 * Build igif from the inode number/generation.
 */
static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
{
	fid->f_seq = ino;
	fid->f_oid = gen;
	fid->f_ver = 0;
}

/*
 * Fids are transmitted across network (in the sender byte-ordering),
 * and stored on disk in big-endian order.
 */
static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
	/* check that all fields are converted */
	CLASSERT(sizeof *src ==
		 sizeof fid_seq(src) +
		 sizeof fid_oid(src) + sizeof fid_ver(src));
	dst->f_seq = cpu_to_le64(fid_seq(src));
	dst->f_oid = cpu_to_le32(fid_oid(src));
	dst->f_ver = cpu_to_le32(fid_ver(src));
}

static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
	/* check that all fields are converted */
	CLASSERT(sizeof *src ==
		 sizeof fid_seq(src) +
		 sizeof fid_oid(src) + sizeof fid_ver(src));
	dst->f_seq = le64_to_cpu(fid_seq(src));
	dst->f_oid = le32_to_cpu(fid_oid(src));
	dst->f_ver = le32_to_cpu(fid_ver(src));
}

static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
{
	/* check that all fields are converted */
	CLASSERT(sizeof *src ==
		 sizeof fid_seq(src) +
		 sizeof fid_oid(src) + sizeof fid_ver(src));
	dst->f_seq = cpu_to_be64(fid_seq(src));
	dst->f_oid = cpu_to_be32(fid_oid(src));
	dst->f_ver = cpu_to_be32(fid_ver(src));
}

static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
	/* check that all fields are converted */
	CLASSERT(sizeof *src ==
		 sizeof fid_seq(src) +
		 sizeof fid_oid(src) + sizeof fid_ver(src));
	dst->f_seq = be64_to_cpu(fid_seq(src));
	dst->f_oid = be32_to_cpu(fid_oid(src));
	dst->f_ver = be32_to_cpu(fid_ver(src));
}

static inline int fid_is_sane(const struct lu_fid *fid)
{
	return fid != NULL &&
	       ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
		fid_is_igif(fid) || fid_is_idif(fid) ||
		fid_seq_is_rsvd(fid_seq(fid)));
}

static inline int fid_is_zero(const struct lu_fid *fid)
{
	return fid_seq(fid) == 0 && fid_oid(fid) == 0;
}

extern void lustre_swab_lu_fid(struct lu_fid *fid);
extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);

static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
	/* Check that there is no alignment padding. */
	CLASSERT(sizeof *f0 ==
		 sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver);
	return memcmp(f0, f1, sizeof *f0) == 0;
}

#define __diff_normalize(val0, val1)			    \
({							      \
	typeof(val0) __val0 = (val0);			   \
	typeof(val1) __val1 = (val1);			   \
								\
	(__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1);     \
})

static inline int lu_fid_cmp(const struct lu_fid *f0,
			     const struct lu_fid *f1)
{
	return
		__diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
		__diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
		__diff_normalize(fid_ver(f0), fid_ver(f1));
}

static inline void ostid_cpu_to_le(struct ost_id *src_oi,
				   struct ost_id *dst_oi)
{
	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
		dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
		dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
	} else {
		fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
	}
}

static inline void ostid_le_to_cpu(struct ost_id *src_oi,
				   struct ost_id *dst_oi)
{
	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
		dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
		dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
	} else {
		fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
	}
}

/** @} lu_fid */

/** \defgroup lu_dir lu_dir
 * @{ */

/**
 * Enumeration of possible directory entry attributes.
 *
 * Attributes follow directory entry header in the order they appear in this
 * enumeration.
 */
enum lu_dirent_attrs {
	LUDA_FID		= 0x0001,
	LUDA_TYPE		= 0x0002,
	LUDA_64BITHASH		= 0x0004,

	/* The following attrs are used for MDT interanl only,
	 * not visible to client */

	/* Verify the dirent consistency */
	LUDA_VERIFY		= 0x8000,
	/* Only check but not repair the dirent inconsistency */
	LUDA_VERIFY_DRYRUN	= 0x4000,
	/* The dirent has been repaired, or to be repaired (dryrun). */
	LUDA_REPAIR		= 0x2000,
	/* The system is upgraded, has beed or to be repaired (dryrun). */
	LUDA_UPGRADE		= 0x1000,
	/* Ignore this record, go to next directly. */
	LUDA_IGNORE		= 0x0800,
};

#define LU_DIRENT_ATTRS_MASK	0xf800

/**
 * Layout of readdir pages, as transmitted on wire.
 */
struct lu_dirent {
	/** valid if LUDA_FID is set. */
	struct lu_fid lde_fid;
	/** a unique entry identifier: a hash or an offset. */
	__u64	 lde_hash;
	/** total record length, including all attributes. */
	__u16	 lde_reclen;
	/** name length */
	__u16	 lde_namelen;
	/** optional variable size attributes following this entry.
	 *  taken from enum lu_dirent_attrs.
	 */
	__u32	 lde_attrs;
	/** name is followed by the attributes indicated in ->ldp_attrs, in
	 *  their natural order. After the last attribute, padding bytes are
	 *  added to make ->lde_reclen a multiple of 8.
	 */
	char	  lde_name[0];
};

/*
 * Definitions of optional directory entry attributes formats.
 *
 * Individual attributes do not have their length encoded in a generic way. It
 * is assumed that consumer of an attribute knows its format. This means that
 * it is impossible to skip over an unknown attribute, except by skipping over all
 * remaining attributes (by using ->lde_reclen), which is not too
 * constraining, because new server versions will append new attributes at
 * the end of an entry.
 */

/**
 * Fid directory attribute: a fid of an object referenced by the entry. This
 * will be almost always requested by the client and supplied by the server.
 *
 * Aligned to 8 bytes.
 */
/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */

/**
 * File type.
 *
 * Aligned to 2 bytes.
 */
struct luda_type {
	__u16 lt_type;
};

struct lu_dirpage {
	__u64	    ldp_hash_start;
	__u64	    ldp_hash_end;
	__u32	    ldp_flags;
	__u32	    ldp_pad0;
	struct lu_dirent ldp_entries[0];
};

enum lu_dirpage_flags {
	/**
	 * dirpage contains no entry.
	 */
	LDF_EMPTY   = 1 << 0,
	/**
	 * last entry's lde_hash equals ldp_hash_end.
	 */
	LDF_COLLIDE = 1 << 1
};

static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
{
	if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
		return NULL;
	else
		return dp->ldp_entries;
}

static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
{
	struct lu_dirent *next;

	if (le16_to_cpu(ent->lde_reclen) != 0)
		next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
	else
		next = NULL;

	return next;
}

static inline int lu_dirent_calc_size(int namelen, __u16 attr)
{
	int size;

	if (attr & LUDA_TYPE) {
		const unsigned align = sizeof(struct luda_type) - 1;
		size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
		size += sizeof(struct luda_type);
	} else
		size = sizeof(struct lu_dirent) + namelen;

	return (size + 7) & ~7;
}

static inline int lu_dirent_size(struct lu_dirent *ent)
{
	if (le16_to_cpu(ent->lde_reclen) == 0) {
		return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
					   le32_to_cpu(ent->lde_attrs));
	}
	return le16_to_cpu(ent->lde_reclen);
}

#define MDS_DIR_END_OFF 0xfffffffffffffffeULL

/**
 * MDS_READPAGE page size
 *
 * This is the directory page size packed in MDS_READPAGE RPC.
 * It's different than PAGE_CACHE_SIZE because the client needs to
 * access the struct lu_dirpage header packed at the beginning of
 * the "page" and without this there isn't any way to know find the
 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
 */
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE  (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK  (~(LU_PAGE_SIZE - 1))

#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))

/** @} lu_dir */

struct lustre_handle {
	__u64 cookie;
};
#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL

static inline int lustre_handle_is_used(struct lustre_handle *lh)
{
	return lh->cookie != 0ull;
}

static inline int lustre_handle_equal(const struct lustre_handle *lh1,
				      const struct lustre_handle *lh2)
{
	return lh1->cookie == lh2->cookie;
}

static inline void lustre_handle_copy(struct lustre_handle *tgt,
				      struct lustre_handle *src)
{
	tgt->cookie = src->cookie;
}

/* flags for lm_flags */
#define MSGHDR_AT_SUPPORT	       0x1
#define MSGHDR_CKSUM_INCOMPAT18	 0x2

#define lustre_msg lustre_msg_v2
/* we depend on this structure to be 8-byte aligned */
/* this type is only endian-adjusted in lustre_unpack_msg() */
struct lustre_msg_v2 {
	__u32 lm_bufcount;
	__u32 lm_secflvr;
	__u32 lm_magic;
	__u32 lm_repsize;
	__u32 lm_cksum;
	__u32 lm_flags;
	__u32 lm_padding_2;
	__u32 lm_padding_3;
	__u32 lm_buflens[0];
};

/* without gss, ptlrpc_body is put at the first buffer. */
#define PTLRPC_NUM_VERSIONS     4
#define JOBSTATS_JOBID_SIZE     32  /* 32 bytes string */
struct ptlrpc_body_v3 {
	struct lustre_handle pb_handle;
	__u32 pb_type;
	__u32 pb_version;
	__u32 pb_opc;
	__u32 pb_status;
	__u64 pb_last_xid;
	__u64 pb_last_seen;
	__u64 pb_last_committed;
	__u64 pb_transno;
	__u32 pb_flags;
	__u32 pb_op_flags;
	__u32 pb_conn_cnt;
	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
	__u32 pb_service_time; /* for rep, actual service time */
	__u32 pb_limit;
	__u64 pb_slv;
	/* VBR: pre-versions */
	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
	/* padding for future needs */
	__u64 pb_padding[4];
	char  pb_jobid[JOBSTATS_JOBID_SIZE];
};
#define ptlrpc_body     ptlrpc_body_v3

struct ptlrpc_body_v2 {
	struct lustre_handle pb_handle;
	__u32 pb_type;
	__u32 pb_version;
	__u32 pb_opc;
	__u32 pb_status;
	__u64 pb_last_xid;
	__u64 pb_last_seen;
	__u64 pb_last_committed;
	__u64 pb_transno;
	__u32 pb_flags;
	__u32 pb_op_flags;
	__u32 pb_conn_cnt;
	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
	__u32 pb_service_time; /* for rep, actual service time, also used for
				  net_latency of req */
	__u32 pb_limit;
	__u64 pb_slv;
	/* VBR: pre-versions */
	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
	/* padding for future needs */
	__u64 pb_padding[4];
};

extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);

/* message body offset for lustre_msg_v2 */
/* ptlrpc body offset in all request/reply messages */
#define MSG_PTLRPC_BODY_OFF	     0

/* normal request/reply message record offset */
#define REQ_REC_OFF		     1
#define REPLY_REC_OFF		   1

/* ldlm request message body offset */
#define DLM_LOCKREQ_OFF		 1 /* lockreq offset */
#define DLM_REQ_REC_OFF		 2 /* normal dlm request record offset */

/* ldlm intent lock message body offset */
#define DLM_INTENT_IT_OFF	       2 /* intent lock it offset */
#define DLM_INTENT_REC_OFF	      3 /* intent lock record offset */

/* ldlm reply message body offset */
#define DLM_LOCKREPLY_OFF	       1 /* lockrep offset */
#define DLM_REPLY_REC_OFF	       2 /* reply record offset */

/** only use in req->rq_{req,rep}_swab_mask */
#define MSG_PTLRPC_HEADER_OFF	   31

/* Flags that are operation-specific go in the top 16 bits. */
#define MSG_OP_FLAG_MASK   0xffff0000
#define MSG_OP_FLAG_SHIFT  16

/* Flags that apply to all requests are in the bottom 16 bits */
#define MSG_GEN_FLAG_MASK     0x0000ffff
#define MSG_LAST_REPLAY	   0x0001
#define MSG_RESENT		0x0002
#define MSG_REPLAY		0x0004
/* #define MSG_AT_SUPPORT	 0x0008
 * This was used in early prototypes of adaptive timeouts, and while there
 * shouldn't be any users of that code there also isn't a need for using this
 * bits. Defer usage until at least 1.10 to avoid potential conflict. */
#define MSG_DELAY_REPLAY	  0x0010
#define MSG_VERSION_REPLAY	0x0020
#define MSG_REQ_REPLAY_DONE       0x0040
#define MSG_LOCK_REPLAY_DONE      0x0080

/*
 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
 */

#define MSG_CONNECT_RECOVERING  0x00000001
#define MSG_CONNECT_RECONNECT   0x00000002
#define MSG_CONNECT_REPLAYABLE  0x00000004
//#define MSG_CONNECT_PEER	0x8
#define MSG_CONNECT_LIBCLIENT   0x00000010
#define MSG_CONNECT_INITIAL     0x00000020
#define MSG_CONNECT_ASYNC       0x00000040
#define MSG_CONNECT_NEXT_VER    0x00000080 /* use next version of lustre_msg */
#define MSG_CONNECT_TRANSNO     0x00000100 /* report transno */

/* Connect flags */
#define OBD_CONNECT_RDONLY		0x1ULL /*client has read-only access*/
#define OBD_CONNECT_INDEX		 0x2ULL /*connect specific LOV idx */
#define OBD_CONNECT_MDS		   0x4ULL /*connect from MDT to OST */
#define OBD_CONNECT_GRANT		 0x8ULL /*OSC gets grant at connect */
#define OBD_CONNECT_SRVLOCK	      0x10ULL /*server takes locks for cli */
#define OBD_CONNECT_VERSION	      0x20ULL /*Lustre versions in ocd */
#define OBD_CONNECT_REQPORTAL	    0x40ULL /*Separate non-IO req portal */
#define OBD_CONNECT_ACL		  0x80ULL /*access control lists */
#define OBD_CONNECT_XATTR	       0x100ULL /*client use extended attr */
#define OBD_CONNECT_CROW		0x200ULL /*MDS+OST create obj on write*/
#define OBD_CONNECT_TRUNCLOCK	   0x400ULL /*locks on server for punch */
#define OBD_CONNECT_TRANSNO	     0x800ULL /*replay sends init transno */
#define OBD_CONNECT_IBITS	      0x1000ULL /*support for inodebits locks*/
#define OBD_CONNECT_JOIN	       0x2000ULL /*files can be concatenated.
						  *We do not support JOIN FILE
						  *anymore, reserve this flags
						  *just for preventing such bit
						  *to be reused.*/
#define OBD_CONNECT_ATTRFID	    0x4000ULL /*Server can GetAttr By Fid*/
#define OBD_CONNECT_NODEVOH	    0x8000ULL /*No open hndl on specl nodes*/
#define OBD_CONNECT_RMT_CLIENT	0x10000ULL /*Remote client */
#define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /*Remote client by force */
#define OBD_CONNECT_BRW_SIZE	  0x40000ULL /*Max bytes per rpc */
#define OBD_CONNECT_QUOTA64	   0x80000ULL /*Not used since 2.4 */
#define OBD_CONNECT_MDS_CAPA	 0x100000ULL /*MDS capability */
#define OBD_CONNECT_OSS_CAPA	 0x200000ULL /*OSS capability */
#define OBD_CONNECT_CANCELSET	0x400000ULL /*Early batched cancels. */
#define OBD_CONNECT_SOM	      0x800000ULL /*Size on MDS */
#define OBD_CONNECT_AT	      0x1000000ULL /*client uses AT */
#define OBD_CONNECT_LRU_RESIZE      0x2000000ULL /*LRU resize feature. */
#define OBD_CONNECT_MDS_MDS	 0x4000000ULL /*MDS-MDS connection */
#define OBD_CONNECT_REAL	    0x8000000ULL /*real connection */
#define OBD_CONNECT_CHANGE_QS      0x10000000ULL /*Not used since 2.4 */
#define OBD_CONNECT_CKSUM	  0x20000000ULL /*support several cksum algos*/
#define OBD_CONNECT_FID	    0x40000000ULL /*FID is supported by server */
#define OBD_CONNECT_VBR	    0x80000000ULL /*version based recovery */
#define OBD_CONNECT_LOV_V3	0x100000000ULL /*client supports LOV v3 EA */
#define OBD_CONNECT_GRANT_SHRINK  0x200000000ULL /* support grant shrink */
#define OBD_CONNECT_SKIP_ORPHAN   0x400000000ULL /* don't reuse orphan objids */
#define OBD_CONNECT_MAX_EASIZE    0x800000000ULL /* preserved for large EA */
#define OBD_CONNECT_FULL20       0x1000000000ULL /* it is 2.0 client */
#define OBD_CONNECT_LAYOUTLOCK   0x2000000000ULL /* client uses layout lock */
#define OBD_CONNECT_64BITHASH    0x4000000000ULL /* client supports 64-bits
						  * directory hash */
#define OBD_CONNECT_MAXBYTES     0x8000000000ULL /* max stripe size */
#define OBD_CONNECT_IMP_RECOV   0x10000000000ULL /* imp recovery support */
#define OBD_CONNECT_JOBSTATS    0x20000000000ULL /* jobid in ptlrpc_body */
#define OBD_CONNECT_UMASK       0x40000000000ULL /* create uses client umask */
#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
						  * RPC error properly */
#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
						  * finer space reservation */
#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
						   * policy and 2.x server */
#define OBD_CONNECT_LVB_TYPE	0x400000000000ULL /* variable type of LVB */
#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
#define OBD_CONNECT_SHORTIO     0x2000000000000ULL/* short io */
#define OBD_CONNECT_PINGLESS	0x4000000000000ULL/* pings not required */
/* XXX README XXX:
 * Please DO NOT add flag values here before first ensuring that this same
 * flag value is not in use on some other branch.  Please clear any such
 * changes with senior engineers before starting to use a new flag.  Then,
 * submit a small patch against EVERY branch that ONLY adds the new flag,
 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
 * can be approved and landed easily to reserve the flag for future use. */

/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
 * connection.  It is a temporary bug fix for Imperative Recovery interop
 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
 * 2.2 clients/servers is no longer needed.  LU-1252/LU-1644. */
#define OBD_CONNECT_MNE_SWAB		 OBD_CONNECT_MDS_MDS

#define OCD_HAS_FLAG(ocd, flg)  \
	(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))


#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE

#define MDT_CONNECT_SUPPORTED  (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
				OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
				OBD_CONNECT_IBITS | \
				OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
				OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
				OBD_CONNECT_RMT_CLIENT | \
				OBD_CONNECT_RMT_CLIENT_FORCE | \
				OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
				OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
				OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
				OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
				OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
				OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
				OBD_CONNECT_EINPROGRESS | \
				OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
				OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
				OBD_CONNECT_PINGLESS)
#define OST_CONNECT_SUPPORTED  (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
				OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
				OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
				OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
				OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
				LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
				OBD_CONNECT_RMT_CLIENT | \
				OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
				OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
				OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
				OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
				OBD_CONNECT_MAX_EASIZE | \
				OBD_CONNECT_EINPROGRESS | \
				OBD_CONNECT_JOBSTATS | \
				OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
				OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
				OBD_CONNECT_PINGLESS)
#define ECHO_CONNECT_SUPPORTED (0)
#define MGS_CONNECT_SUPPORTED  (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
				OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
				OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)

/* Features required for this version of the client to work with server */
#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
				 OBD_CONNECT_FULL20)

#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\
						((patch)<<8) + (fix))
#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
#define OBD_OCD_VERSION_FIX(version)   ((int)(version)&255)

/* This structure is used for both request and reply.
 *
 * If we eventually have separate connect data for different types, which we
 * almost certainly will, then perhaps we stick a union in here. */
struct obd_connect_data_v1 {
	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
	__u32 ocd_version;	 /* lustre release version number */
	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
	__u32 ocd_index;	 /* LOV index to connect to */
	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes, must be 2^n */
	__u64 ocd_ibits_known;   /* inode bits this client understands */
	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
	__u32 ocd_unused;	/* also fix lustre_swab_connect */
	__u64 ocd_transno;       /* first transno from client to be replayed */
	__u32 ocd_group;	 /* MDS group on OST */
	__u32 ocd_cksum_types;   /* supported checksum algorithms */
	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
	__u32 ocd_instance;      /* also fix lustre_swab_connect */
	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
};

struct obd_connect_data {
	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
	__u32 ocd_version;	 /* lustre release version number */
	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
	__u32 ocd_index;	 /* LOV index to connect to */
	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes */
	__u64 ocd_ibits_known;   /* inode bits this client understands */
	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
	__u32 ocd_unused;	/* also fix lustre_swab_connect */
	__u64 ocd_transno;       /* first transno from client to be replayed */
	__u32 ocd_group;	 /* MDS group on OST */
	__u32 ocd_cksum_types;   /* supported checksum algorithms */
	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
	__u32 ocd_instance;      /* instance # of this target */
	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
	/* Fields after ocd_maxbytes are only accessible by the receiver
	 * if the corresponding flag in ocd_connect_flags is set. Accessing
	 * any field after ocd_maxbytes on the receiver without a valid flag
	 * may result in out-of-bound memory access and kernel oops. */
	__u64 padding1;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding2;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding3;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding4;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding5;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding6;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding7;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding8;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding9;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingA;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingB;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingC;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingD;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingE;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingF;	  /* added 2.1.0. also fix lustre_swab_connect */
};
/* XXX README XXX:
 * Please DO NOT use any fields here before first ensuring that this same
 * field is not in use on some other branch.  Please clear any such changes
 * with senior engineers before starting to use a new field.  Then, submit
 * a small patch against EVERY branch that ONLY adds the new field along with
 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
 * reserve the flag for future use. */


extern void lustre_swab_connect(struct obd_connect_data *ocd);

/*
 * Supported checksum algorithms. Up to 32 checksum types are supported.
 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
 * algorithm and also the OBD_FL_CKSUM* flags.
 */
typedef enum {
	OBD_CKSUM_CRC32 = 0x00000001,
	OBD_CKSUM_ADLER = 0x00000002,
	OBD_CKSUM_CRC32C= 0x00000004,
} cksum_type_t;

/*
 *   OST requests: OBDO & OBD request records
 */

/* opcodes */
typedef enum {
	OST_REPLY      =  0,       /* reply ? */
	OST_GETATTR    =  1,
	OST_SETATTR    =  2,
	OST_READ       =  3,
	OST_WRITE      =  4,
	OST_CREATE     =  5,
	OST_DESTROY    =  6,
	OST_GET_INFO   =  7,
	OST_CONNECT    =  8,
	OST_DISCONNECT =  9,
	OST_PUNCH      = 10,
	OST_OPEN       = 11,
	OST_CLOSE      = 12,
	OST_STATFS     = 13,
	OST_SYNC       = 16,
	OST_SET_INFO   = 17,
	OST_QUOTACHECK = 18,
	OST_QUOTACTL   = 19,
	OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
	OST_LAST_OPC
} ost_cmd_t;
#define OST_FIRST_OPC  OST_REPLY

enum obdo_flags {
	OBD_FL_INLINEDATA   = 0x00000001,
	OBD_FL_OBDMDEXISTS  = 0x00000002,
	OBD_FL_DELORPHAN    = 0x00000004, /* if set in o_flags delete orphans */
	OBD_FL_NORPC	= 0x00000008, /* set in o_flags do in OSC not OST */
	OBD_FL_IDONLY       = 0x00000010, /* set in o_flags only adjust obj id*/
	OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
	OBD_FL_DEBUG_CHECK  = 0x00000040, /* echo client/server debug check */
	OBD_FL_NO_USRQUOTA  = 0x00000100, /* the object's owner is over quota */
	OBD_FL_NO_GRPQUOTA  = 0x00000200, /* the object's group is over quota */
	OBD_FL_CREATE_CROW  = 0x00000400, /* object should be create on write */
	OBD_FL_SRVLOCK      = 0x00000800, /* delegate DLM locking to server */
	OBD_FL_CKSUM_CRC32  = 0x00001000, /* CRC32 checksum type */
	OBD_FL_CKSUM_ADLER  = 0x00002000, /* ADLER checksum type */
	OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
	OBD_FL_CKSUM_RSVD2  = 0x00008000, /* for future cksum types */
	OBD_FL_CKSUM_RSVD3  = 0x00010000, /* for future cksum types */
	OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
	OBD_FL_MMAP	 = 0x00040000, /* object is mmapped on the client.
					   * XXX: obsoleted - reserved for old
					   * clients prior than 2.2 */
	OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
	OBD_FL_NOSPC_BLK    = 0x00100000, /* no more block space on OST */

	/* Note that while these checksum values are currently separate bits,
	 * in 2.x we can actually allow all values from 1-31 if we wanted. */
	OBD_FL_CKSUM_ALL    = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
			      OBD_FL_CKSUM_CRC32C,

	/* mask for local-only flag, which won't be sent over network */
	OBD_FL_LOCAL_MASK   = 0xF0000000,
};

#define LOV_MAGIC_V1      0x0BD10BD0
#define LOV_MAGIC	 LOV_MAGIC_V1
#define LOV_MAGIC_JOIN_V1 0x0BD20BD0
#define LOV_MAGIC_V3      0x0BD30BD0

/*
 * magic for fully defined striping
 * the idea is that we should have different magics for striping "hints"
 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
 * we can't just change it w/o long way preparation, but we still need a
 * mechanism to allow LOD to differentiate hint versus ready striping.
 * so, at the moment we do a trick: MDT knows what to expect from request
 * depending on the case (replay uses ready striping, non-replay req uses
 * hints), so MDT replaces magic with appropriate one and now LOD can
 * easily understand what's inside -bzzz
 */
#define LOV_MAGIC_V1_DEF  0x0CD10BD0
#define LOV_MAGIC_V3_DEF  0x0CD30BD0

#define LOV_PATTERN_RAID0 0x001   /* stripes are used round-robin */
#define LOV_PATTERN_RAID1 0x002   /* stripes are mirrors of each other */
#define LOV_PATTERN_FIRST 0x100   /* first stripe is not in round-robin */
#define LOV_PATTERN_CMOBD 0x200

#define lov_ost_data lov_ost_data_v1
struct lov_ost_data_v1 {	  /* per-stripe data structure (little-endian)*/
	struct ost_id l_ost_oi;	  /* OST object ID */
	__u32 l_ost_gen;	  /* generation of this l_ost_idx */
	__u32 l_ost_idx;	  /* OST index in LOV (lov_tgt_desc->tgts) */
};

#define lov_mds_md lov_mds_md_v1
struct lov_mds_md_v1 {	    /* LOV EA mds/wire data (little-endian) */
	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V1 */
	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
	struct ost_id	lmm_oi;	  /* LOV object ID */
	__u32 lmm_stripe_size;    /* size of stripe in bytes */
	/* lmm_stripe_count used to be __u32 */
	__u16 lmm_stripe_count;   /* num stripes in use for this object */
	__u16 lmm_layout_gen;     /* layout generation number */
	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};

/**
 * Sigh, because pre-2.4 uses
 * struct lov_mds_md_v1 {
 *	........
 *	__u64 lmm_object_id;
 *	__u64 lmm_object_seq;
 *      ......
 *      }
 * to identify the LOV(MDT) object, and lmm_object_seq will
 * be normal_fid, which make it hard to combine these conversion
 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
 *
 * We can tell the lmm_oi by this way,
 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
 *      lmm_oi.f_ver = 0
 *
 * But currently lmm_oi/lsm_oi does not have any "real" usages,
 * except for printing some information, and the user can always
 * get the real FID from LMA, besides this multiple case check might
 * make swab more complicate. So we will keep using id/seq for lmm_oi.
 */

static inline void fid_to_lmm_oi(const struct lu_fid *fid,
				 struct ost_id *oi)
{
	oi->oi.oi_id = fid_oid(fid);
	oi->oi.oi_seq = fid_seq(fid);
}

static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
{
	oi->oi.oi_seq = seq;
}

static inline __u64 lmm_oi_id(struct ost_id *oi)
{
	return oi->oi.oi_id;
}

static inline __u64 lmm_oi_seq(struct ost_id *oi)
{
	return oi->oi.oi_seq;
}

static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
				    struct ost_id *src_oi)
{
	dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
	dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
}

static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
				    struct ost_id *src_oi)
{
	dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
	dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
}

/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */

#define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
#define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))

#define XATTR_NAME_ACL_ACCESS   "system.posix_acl_access"
#define XATTR_NAME_ACL_DEFAULT  "system.posix_acl_default"
#define XATTR_USER_PREFIX       "user."
#define XATTR_TRUSTED_PREFIX    "trusted."
#define XATTR_SECURITY_PREFIX   "security."
#define XATTR_LUSTRE_PREFIX     "lustre."

#define XATTR_NAME_LOV	  "trusted.lov"
#define XATTR_NAME_LMA	  "trusted.lma"
#define XATTR_NAME_LMV	  "trusted.lmv"
#define XATTR_NAME_LINK	 "trusted.link"
#define XATTR_NAME_FID	  "trusted.fid"
#define XATTR_NAME_VERSION      "trusted.version"
#define XATTR_NAME_SOM		"trusted.som"
#define XATTR_NAME_HSM		"trusted.hsm"
#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"

struct lov_mds_md_v3 {	    /* LOV EA mds/wire data (little-endian) */
	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V3 */
	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
	struct ost_id	lmm_oi;	  /* LOV object ID */
	__u32 lmm_stripe_size;    /* size of stripe in bytes */
	/* lmm_stripe_count used to be __u32 */
	__u16 lmm_stripe_count;   /* num stripes in use for this object */
	__u16 lmm_layout_gen;     /* layout generation number */
	char  lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};

#define OBD_MD_FLID	(0x00000001ULL) /* object ID */
#define OBD_MD_FLATIME     (0x00000002ULL) /* access time */
#define OBD_MD_FLMTIME     (0x00000004ULL) /* data modification time */
#define OBD_MD_FLCTIME     (0x00000008ULL) /* change time */
#define OBD_MD_FLSIZE      (0x00000010ULL) /* size */
#define OBD_MD_FLBLOCKS    (0x00000020ULL) /* allocated blocks count */
#define OBD_MD_FLBLKSZ     (0x00000040ULL) /* block size */
#define OBD_MD_FLMODE      (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
#define OBD_MD_FLTYPE      (0x00000100ULL) /* object type (mode & S_IFMT) */
#define OBD_MD_FLUID       (0x00000200ULL) /* user ID */
#define OBD_MD_FLGID       (0x00000400ULL) /* group ID */
#define OBD_MD_FLFLAGS     (0x00000800ULL) /* flags word */
#define OBD_MD_FLNLINK     (0x00002000ULL) /* link count */
#define OBD_MD_FLGENER     (0x00004000ULL) /* generation number */
/*#define OBD_MD_FLINLINE    (0x00008000ULL)  inline data. used until 1.6.5 */
#define OBD_MD_FLRDEV      (0x00010000ULL) /* device number */
#define OBD_MD_FLEASIZE    (0x00020000ULL) /* extended attribute data */
#define OBD_MD_LINKNAME    (0x00040000ULL) /* symbolic link target */
#define OBD_MD_FLHANDLE    (0x00080000ULL) /* file/lock handle */
#define OBD_MD_FLCKSUM     (0x00100000ULL) /* bulk data checksum */
#define OBD_MD_FLQOS       (0x00200000ULL) /* quality of service stats */
/*#define OBD_MD_FLOSCOPQ    (0x00400000ULL) osc opaque data, never used */
#define OBD_MD_FLCOOKIE    (0x00800000ULL) /* log cancellation cookie */
#define OBD_MD_FLGROUP     (0x01000000ULL) /* group */
#define OBD_MD_FLFID       (0x02000000ULL) /* ->ost write inline fid */
#define OBD_MD_FLEPOCH     (0x04000000ULL) /* ->ost write with ioepoch */
					   /* ->mds if epoch opens or closes */
#define OBD_MD_FLGRANT     (0x08000000ULL) /* ost preallocation space grant */
#define OBD_MD_FLDIREA     (0x10000000ULL) /* dir's extended attribute data */
#define OBD_MD_FLUSRQUOTA  (0x20000000ULL) /* over quota flags sent from ost */
#define OBD_MD_FLGRPQUOTA  (0x40000000ULL) /* over quota flags sent from ost */
#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */

#define OBD_MD_MDS	 (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT       (0x0000000200000000ULL) /* reintegrate oa */
#define OBD_MD_MEA	 (0x0000000400000000ULL) /* CMD split EA  */

/* OBD_MD_MDTIDX is used to get MDT index, but it is never been used overwire,
 * and it is already obsolete since 2.3 */
/* #define OBD_MD_MDTIDX      (0x0000000800000000ULL) */

#define OBD_MD_FLXATTR       (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS     (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLXATTRRM     (0x0000004000000000ULL) /* xattr remove */
#define OBD_MD_FLACL	 (0x0000008000000000ULL) /* ACL */
#define OBD_MD_FLRMTPERM     (0x0000010000000000ULL) /* remote permission */
#define OBD_MD_FLMDSCAPA     (0x0000020000000000ULL) /* MDS capability */
#define OBD_MD_FLOSSCAPA     (0x0000040000000000ULL) /* OSS capability */
#define OBD_MD_FLCKSPLIT     (0x0000080000000000ULL) /* Check split on server */
#define OBD_MD_FLCROSSREF    (0x0000100000000000ULL) /* Cross-ref case */
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
						      * under lock */
#define OBD_MD_FLOBJCOUNT    (0x0000400000000000ULL) /* for multiple destroy */

#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */

#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */

#define OBD_MD_FLGETATTR (OBD_MD_FLID    | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
			  OBD_MD_FLCTIME | OBD_MD_FLSIZE  | OBD_MD_FLBLKSZ | \
			  OBD_MD_FLMODE  | OBD_MD_FLTYPE  | OBD_MD_FLUID   | \
			  OBD_MD_FLGID   | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
			  OBD_MD_FLGENER | OBD_MD_FLRDEV  | OBD_MD_FLGROUP)

/* don't forget obdo_fid which is way down at the bottom so it can
 * come after the definition of llog_cookie */

enum hss_valid {
	HSS_SETMASK	= 0x01,
	HSS_CLEARMASK	= 0x02,
	HSS_ARCHIVE_ID	= 0x04,
};

struct hsm_state_set {
	__u32	hss_valid;
	__u32	hss_archive_id;
	__u64	hss_setmask;
	__u64	hss_clearmask;
};

extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);

extern void lustre_swab_obd_statfs (struct obd_statfs *os);

/* ost_body.data values for OST_BRW */

#define OBD_BRW_READ	    0x01
#define OBD_BRW_WRITE	   0x02
#define OBD_BRW_RWMASK	  (OBD_BRW_READ | OBD_BRW_WRITE)
#define OBD_BRW_SYNC	    0x08 /* this page is a part of synchronous
				      * transfer and is not accounted in
				      * the grant. */
#define OBD_BRW_CHECK	   0x10
#define OBD_BRW_FROM_GRANT      0x20 /* the osc manages this under llite */
#define OBD_BRW_GRANTED	 0x40 /* the ost manages this */
#define OBD_BRW_NOCACHE	 0x80 /* this page is a part of non-cached IO */
#define OBD_BRW_NOQUOTA	0x100
#define OBD_BRW_SRVLOCK	0x200 /* Client holds no lock over this page */
#define OBD_BRW_ASYNC	  0x400 /* Server may delay commit to disk */
#define OBD_BRW_MEMALLOC       0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */

#define OBD_OBJECT_EOF 0xffffffffffffffffULL

#define OST_MIN_PRECREATE 32
#define OST_MAX_PRECREATE 20000

struct obd_ioobj {
	struct ost_id	ioo_oid;	/* object ID, if multi-obj BRW */
	__u32		ioo_max_brw;	/* low 16 bits were o_mode before 2.4,
					 * now (PTLRPC_BULK_OPS_COUNT - 1) in
					 * high 16 bits in 2.4 and later */
	__u32		ioo_bufcnt;	/* number of niobufs for this object */
};

#define IOOBJ_MAX_BRW_BITS	16
#define IOOBJ_TYPE_MASK		((1U << IOOBJ_MAX_BRW_BITS) - 1)
#define ioobj_max_brw_get(ioo)	(((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
#define ioobj_max_brw_set(ioo, num)					\
do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)

extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);

/* multiple of 8 bytes => can array */
struct niobuf_remote {
	__u64 offset;
	__u32 len;
	__u32 flags;
};

extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);

/* lock value block communicated between the filter and llite */

/* OST_LVB_ERR_INIT is needed because the return code in rc is
 * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
#define OST_LVB_IS_ERR(blocks)					  \
	((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
#define OST_LVB_SET_ERR(blocks, rc)				     \
	do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
#define OST_LVB_GET_ERR(blocks)    (int)(blocks - OST_LVB_ERR_INIT)

struct ost_lvb_v1 {
	__u64		lvb_size;
	obd_time	lvb_mtime;
	obd_time	lvb_atime;
	obd_time	lvb_ctime;
	__u64		lvb_blocks;
};

extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);

struct ost_lvb {
	__u64		lvb_size;
	obd_time	lvb_mtime;
	obd_time	lvb_atime;
	obd_time	lvb_ctime;
	__u64		lvb_blocks;
	__u32		lvb_mtime_ns;
	__u32		lvb_atime_ns;
	__u32		lvb_ctime_ns;
	__u32		lvb_padding;
};

extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);

/*
 *   lquota data structures
 */

#ifndef QUOTABLOCK_BITS
#define QUOTABLOCK_BITS 10
#endif

#ifndef QUOTABLOCK_SIZE
#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
#endif

#ifndef toqb
#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
#endif

/* The lquota_id structure is an union of all the possible identifier types that
 * can be used with quota, this includes:
 * - 64-bit user ID
 * - 64-bit group ID
 * - a FID which can be used for per-directory quota in the future */
union lquota_id {
	struct lu_fid	qid_fid; /* FID for per-directory quota */
	__u64		qid_uid; /* user identifier */
	__u64		qid_gid; /* group identifier */
};

/* quotactl management */
struct obd_quotactl {
	__u32			qc_cmd;
	__u32			qc_type; /* see Q_* flag below */
	__u32			qc_id;
	__u32			qc_stat;
	struct obd_dqinfo	qc_dqinfo;
	struct obd_dqblk	qc_dqblk;
};

extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);

#define Q_QUOTACHECK	0x800100 /* deprecated as of 2.4 */
#define Q_INITQUOTA	0x800101 /* deprecated as of 2.4  */
#define Q_GETOINFO	0x800102 /* get obd quota info */
#define Q_GETOQUOTA	0x800103 /* get obd quotas */
#define Q_FINVALIDATE	0x800104 /* deprecated as of 2.4 */

#define Q_COPY(out, in, member) (out)->member = (in)->member

#define QCTL_COPY(out, in)		\
do {					\
	Q_COPY(out, in, qc_cmd);	\
	Q_COPY(out, in, qc_type);	\
	Q_COPY(out, in, qc_id);		\
	Q_COPY(out, in, qc_stat);	\
	Q_COPY(out, in, qc_dqinfo);	\
	Q_COPY(out, in, qc_dqblk);	\
} while (0)

/* Body of quota request used for quota acquire/release RPCs between quota
 * master (aka QMT) and slaves (ak QSD). */
struct quota_body {
	struct lu_fid	qb_fid;     /* FID of global index packing the pool ID
				      * and type (data or metadata) as well as
				      * the quota type (user or group). */
	union lquota_id	qb_id;      /* uid or gid or directory FID */
	__u32		qb_flags;   /* see below */
	__u32		qb_padding;
	__u64		qb_count;   /* acquire/release count (kbytes/inodes) */
	__u64		qb_usage;   /* current slave usage (kbytes/inodes) */
	__u64		qb_slv_ver; /* slave index file version */
	struct lustre_handle	qb_lockh;     /* per-ID lock handle */
	struct lustre_handle	qb_glb_lockh; /* global lock handle */
	__u64		qb_padding1[4];
};

/* When the quota_body is used in the reply of quota global intent
 * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
#define qb_slv_fid	qb_fid
/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
 * quota reply */
#define qb_qunit	qb_usage

#define QUOTA_DQACQ_FL_ACQ	0x1  /* acquire quota */
#define QUOTA_DQACQ_FL_PREACQ	0x2  /* pre-acquire */
#define QUOTA_DQACQ_FL_REL	0x4  /* release quota */
#define QUOTA_DQACQ_FL_REPORT	0x8  /* report usage */

extern void lustre_swab_quota_body(struct quota_body *b);

/* Quota types currently supported */
enum {
	LQUOTA_TYPE_USR	= 0x00, /* maps to USRQUOTA */
	LQUOTA_TYPE_GRP	= 0x01, /* maps to GRPQUOTA */
	LQUOTA_TYPE_MAX
};

/* There are 2 different resource types on which a quota limit can be enforced:
 * - inodes on the MDTs
 * - blocks on the OSTs */
enum {
	LQUOTA_RES_MD		= 0x01, /* skip 0 to avoid null oid in FID */
	LQUOTA_RES_DT		= 0x02,
	LQUOTA_LAST_RES,
	LQUOTA_FIRST_RES	= LQUOTA_RES_MD
};
#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)

/*
 * Space accounting support
 * Format of an accounting record, providing disk usage information for a given
 * user or group
 */
struct lquota_acct_rec { /* 16 bytes */
	__u64 bspace;  /* current space in use */
	__u64 ispace;  /* current # inodes in use */
};

/*
 * Global quota index support
 * Format of a global record, providing global quota settings for a given quota
 * identifier
 */
struct lquota_glb_rec { /* 32 bytes */
	__u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
	__u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
	__u64 qbr_time;      /* grace time, in seconds */
	__u64 qbr_granted;   /* how much is granted to slaves, in #inodes or
			      * kbytes */
};

/*
 * Slave index support
 * Format of a slave record, recording how much space is granted to a given
 * slave
 */
struct lquota_slv_rec { /* 8 bytes */
	__u64 qsr_granted; /* space granted to the slave for the key=ID,
			    * in #inodes or kbytes */
};

/* Data structures associated with the quota locks */

/* Glimpse descriptor used for the index & per-ID quota locks */
struct ldlm_gl_lquota_desc {
	union lquota_id	gl_id;    /* quota ID subject to the glimpse */
	__u64		gl_flags; /* see LQUOTA_FL* below */
	__u64		gl_ver;   /* new index version */
	__u64		gl_hardlimit; /* new hardlimit or qunit value */
	__u64		gl_softlimit; /* new softlimit */
	__u64		gl_time;
	__u64		gl_pad2;
};
#define gl_qunit	gl_hardlimit /* current qunit value used when
				      * glimpsing per-ID quota locks */

/* quota glimpse flags */
#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */

/* LVB used with quota (global and per-ID) locks */
struct lquota_lvb {
	__u64	lvb_flags;	/* see LQUOTA_FL* above */
	__u64	lvb_id_may_rel; /* space that might be released later */
	__u64	lvb_id_rel;     /* space released by the slave for this ID */
	__u64	lvb_id_qunit;   /* current qunit value */
	__u64	lvb_pad1;
};

extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);

/* LVB used with global quota lock */
#define lvb_glb_ver  lvb_id_may_rel /* current version of the global index */

/* op codes */
typedef enum {
	QUOTA_DQACQ	= 601,
	QUOTA_DQREL	= 602,
	QUOTA_LAST_OPC
} quota_cmd_t;
#define QUOTA_FIRST_OPC	QUOTA_DQACQ

/*
 *   MDS REQ RECORDS
 */

/* opcodes */
typedef enum {
	MDS_GETATTR		= 33,
	MDS_GETATTR_NAME	= 34,
	MDS_CLOSE		= 35,
	MDS_REINT		= 36,
	MDS_READPAGE		= 37,
	MDS_CONNECT		= 38,
	MDS_DISCONNECT		= 39,
	MDS_GETSTATUS		= 40,
	MDS_STATFS		= 41,
	MDS_PIN			= 42,
	MDS_UNPIN		= 43,
	MDS_SYNC		= 44,
	MDS_DONE_WRITING	= 45,
	MDS_SET_INFO		= 46,
	MDS_QUOTACHECK		= 47,
	MDS_QUOTACTL		= 48,
	MDS_GETXATTR		= 49,
	MDS_SETXATTR		= 50, /* obsolete, now it's MDS_REINT op */
	MDS_WRITEPAGE		= 51,
	MDS_IS_SUBDIR		= 52,
	MDS_GET_INFO		= 53,
	MDS_HSM_STATE_GET	= 54,
	MDS_HSM_STATE_SET	= 55,
	MDS_HSM_ACTION		= 56,
	MDS_HSM_PROGRESS	= 57,
	MDS_HSM_REQUEST		= 58,
	MDS_HSM_CT_REGISTER	= 59,
	MDS_HSM_CT_UNREGISTER	= 60,
	MDS_SWAP_LAYOUTS	= 61,
	MDS_LAST_OPC
} mds_cmd_t;

#define MDS_FIRST_OPC    MDS_GETATTR


/* opcodes for object update */
typedef enum {
	UPDATE_OBJ	= 1000,
	UPDATE_LAST_OPC
} update_cmd_t;

#define UPDATE_FIRST_OPC    UPDATE_OBJ

/*
 * Do not exceed 63
 */

typedef enum {
	REINT_SETATTR  = 1,
	REINT_CREATE   = 2,
	REINT_LINK     = 3,
	REINT_UNLINK   = 4,
	REINT_RENAME   = 5,
	REINT_OPEN     = 6,
	REINT_SETXATTR = 7,
	REINT_RMENTRY  = 8,
//      REINT_WRITE    = 9,
	REINT_MAX
} mds_reint_t, mdt_reint_t;

extern void lustre_swab_generic_32s (__u32 *val);

/* the disposition of the intent outlines what was executed */
#define DISP_IT_EXECD	0x00000001
#define DISP_LOOKUP_EXECD    0x00000002
#define DISP_LOOKUP_NEG      0x00000004
#define DISP_LOOKUP_POS      0x00000008
#define DISP_OPEN_CREATE     0x00000010
#define DISP_OPEN_OPEN       0x00000020
#define DISP_ENQ_COMPLETE    0x00400000
#define DISP_ENQ_OPEN_REF    0x00800000
#define DISP_ENQ_CREATE_REF  0x01000000
#define DISP_OPEN_LOCK       0x02000000

/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001       /* dentry, mode, owner, group */
#define MDS_INODELOCK_UPDATE 0x000002       /* size, links, timestamps */
#define MDS_INODELOCK_OPEN   0x000004       /* For opened files */
#define MDS_INODELOCK_LAYOUT 0x000008       /* for layout */
#define MDS_INODELOCK_PERM   0x000010       /* for permission */

#define MDS_INODELOCK_MAXSHIFT 4
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)

extern void lustre_swab_ll_fid (struct ll_fid *fid);

/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
 * but was moved into name[1] along with the OID to avoid consuming the
 * name[2,3] fields that need to be used for the quota id (also a FID). */
enum {
	LUSTRE_RES_ID_SEQ_OFF = 0,
	LUSTRE_RES_ID_VER_OID_OFF = 1,
	LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
	LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
	LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
	LUSTRE_RES_ID_HSH_OFF = 3
};

#define MDS_STATUS_CONN 1
#define MDS_STATUS_LOV 2

/* mdt_thread_info.mti_flags. */
enum md_op_flags {
	/* The flag indicates Size-on-MDS attributes are changed. */
	MF_SOM_CHANGE	   = (1 << 0),
	/* Flags indicates an epoch opens or closes. */
	MF_EPOCH_OPEN	   = (1 << 1),
	MF_EPOCH_CLOSE	  = (1 << 2),
	MF_MDC_CANCEL_FID1      = (1 << 3),
	MF_MDC_CANCEL_FID2      = (1 << 4),
	MF_MDC_CANCEL_FID3      = (1 << 5),
	MF_MDC_CANCEL_FID4      = (1 << 6),
	/* There is a pending attribute update. */
	MF_SOM_AU	       = (1 << 7),
	/* Cancel OST locks while getattr OST attributes. */
	MF_GETATTR_LOCK	 = (1 << 8),
	MF_GET_MDT_IDX	  = (1 << 9),
};

#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)

#define LUSTRE_BFLAG_UNCOMMITTED_WRITES   0x1

/* these should be identical to their EXT4_*_FL counterparts, they are
 * redefined here only to avoid dragging in fs/ext4/ext4.h */
#define LUSTRE_SYNC_FL	 0x00000008 /* Synchronous updates */
#define LUSTRE_IMMUTABLE_FL    0x00000010 /* Immutable file */
#define LUSTRE_APPEND_FL       0x00000020 /* writes to file may only append */
#define LUSTRE_NOATIME_FL      0x00000080 /* do not update atime */
#define LUSTRE_DIRSYNC_FL      0x00010000 /* dirsync behaviour (dir only) */

/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
 * for the client inode i_flags.  The LUSTRE_*_FL are the Lustre wire
 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
 * the S_* flags are kernel-internal values that change between kernel
 * versions.  These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
 * See b=16526 for a full history. */
static inline int ll_ext_to_inode_flags(int flags)
{
	return (((flags & LUSTRE_SYNC_FL)      ? S_SYNC      : 0) |
		((flags & LUSTRE_NOATIME_FL)   ? S_NOATIME   : 0) |
		((flags & LUSTRE_APPEND_FL)    ? S_APPEND    : 0) |
#if defined(S_DIRSYNC)
		((flags & LUSTRE_DIRSYNC_FL)   ? S_DIRSYNC   : 0) |
#endif
		((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
}

static inline int ll_inode_to_ext_flags(int iflags)
{
	return (((iflags & S_SYNC)      ? LUSTRE_SYNC_FL      : 0) |
		((iflags & S_NOATIME)   ? LUSTRE_NOATIME_FL   : 0) |
		((iflags & S_APPEND)    ? LUSTRE_APPEND_FL    : 0) |
#if defined(S_DIRSYNC)
		((iflags & S_DIRSYNC)   ? LUSTRE_DIRSYNC_FL   : 0) |
#endif
		((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
}

struct mdt_body {
	struct lu_fid  fid1;
	struct lu_fid  fid2;
	struct lustre_handle handle;
	__u64	  valid;
	__u64	  size;   /* Offset, in the case of MDS_READPAGE */
       obd_time	mtime;
       obd_time	atime;
       obd_time	ctime;
	__u64	  blocks; /* XID, in the case of MDS_READPAGE */
	__u64	  ioepoch;
	__u64	       unused1; /* was "ino" until 2.4.0 */
	__u32	  fsuid;
	__u32	  fsgid;
	__u32	  capability;
	__u32	  mode;
	__u32	  uid;
	__u32	  gid;
	__u32	  flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
	__u32	  rdev;
	__u32	  nlink; /* #bytes to read in the case of MDS_READPAGE */
	__u32	       unused2; /* was "generation" until 2.4.0 */
	__u32	  suppgid;
	__u32	  eadatasize;
	__u32	  aclsize;
	__u32	  max_mdsize;
	__u32	  max_cookiesize;
	__u32	  uid_h; /* high 32-bits of uid, for FUID */
	__u32	  gid_h; /* high 32-bits of gid, for FUID */
	__u32	  padding_5; /* also fix lustre_swab_mdt_body */
	__u64	  padding_6;
	__u64	  padding_7;
	__u64	  padding_8;
	__u64	  padding_9;
	__u64	  padding_10;
}; /* 216 */

extern void lustre_swab_mdt_body (struct mdt_body *b);

struct mdt_ioepoch {
	struct lustre_handle handle;
	__u64  ioepoch;
	__u32  flags;
	__u32  padding;
};

extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);

/* permissions for md_perm.mp_perm */
enum {
	CFS_SETUID_PERM = 0x01,
	CFS_SETGID_PERM = 0x02,
	CFS_SETGRP_PERM = 0x04,
	CFS_RMTACL_PERM = 0x08,
	CFS_RMTOWN_PERM = 0x10
};

/* inode access permission for remote user, the inode info are omitted,
 * for client knows them. */
struct mdt_remote_perm {
	__u32	   rp_uid;
	__u32	   rp_gid;
	__u32	   rp_fsuid;
	__u32	   rp_fsuid_h;
	__u32	   rp_fsgid;
	__u32	   rp_fsgid_h;
	__u32	   rp_access_perm; /* MAY_READ/WRITE/EXEC */
	__u32	   rp_padding;
};

extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);

struct mdt_rec_setattr {
	__u32	   sa_opcode;
	__u32	   sa_cap;
	__u32	   sa_fsuid;
	__u32	   sa_fsuid_h;
	__u32	   sa_fsgid;
	__u32	   sa_fsgid_h;
	__u32	   sa_suppgid;
	__u32	   sa_suppgid_h;
	__u32	   sa_padding_1;
	__u32	   sa_padding_1_h;
	struct lu_fid   sa_fid;
	__u64	   sa_valid;
	__u32	   sa_uid;
	__u32	   sa_gid;
	__u64	   sa_size;
	__u64	   sa_blocks;
	obd_time	sa_mtime;
	obd_time	sa_atime;
	obd_time	sa_ctime;
	__u32	   sa_attr_flags;
	__u32	   sa_mode;
	__u32	   sa_bias;      /* some operation flags */
	__u32	   sa_padding_3;
	__u32	   sa_padding_4;
	__u32	   sa_padding_5;
};

extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);

/*
 * Attribute flags used in mdt_rec_setattr::sa_valid.
 * The kernel's #defines for ATTR_* should not be used over the network
 * since the client and MDS may run different kernels (see bug 13828)
 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
 */
#define MDS_ATTR_MODE	  0x1ULL /* = 1 */
#define MDS_ATTR_UID	   0x2ULL /* = 2 */
#define MDS_ATTR_GID	   0x4ULL /* = 4 */
#define MDS_ATTR_SIZE	  0x8ULL /* = 8 */
#define MDS_ATTR_ATIME	0x10ULL /* = 16 */
#define MDS_ATTR_MTIME	0x20ULL /* = 32 */
#define MDS_ATTR_CTIME	0x40ULL /* = 64 */
#define MDS_ATTR_ATIME_SET    0x80ULL /* = 128 */
#define MDS_ATTR_MTIME_SET   0x100ULL /* = 256 */
#define MDS_ATTR_FORCE       0x200ULL /* = 512, Not a change, but a change it */
#define MDS_ATTR_ATTR_FLAG   0x400ULL /* = 1024 */
#define MDS_ATTR_KILL_SUID   0x800ULL /* = 2048 */
#define MDS_ATTR_KILL_SGID  0x1000ULL /* = 4096 */
#define MDS_ATTR_CTIME_SET  0x2000ULL /* = 8192 */
#define MDS_ATTR_FROM_OPEN  0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
#define MDS_ATTR_BLOCKS     0x8000ULL /* = 32768 */

#ifndef FMODE_READ
#define FMODE_READ	       00000001
#define FMODE_WRITE	      00000002
#endif

#define MDS_FMODE_CLOSED	 00000000
#define MDS_FMODE_EXEC	   00000004
/* IO Epoch is opened on a closed file. */
#define MDS_FMODE_EPOCH	  01000000
/* IO Epoch is opened on a file truncate. */
#define MDS_FMODE_TRUNC	  02000000
/* Size-on-MDS Attribute Update is pending. */
#define MDS_FMODE_SOM	    04000000

#define MDS_OPEN_CREATED	 00000010
#define MDS_OPEN_CROSS	   00000020

#define MDS_OPEN_CREAT	   00000100
#define MDS_OPEN_EXCL	    00000200
#define MDS_OPEN_TRUNC	   00001000
#define MDS_OPEN_APPEND	  00002000
#define MDS_OPEN_SYNC	    00010000
#define MDS_OPEN_DIRECTORY       00200000

#define MDS_OPEN_BY_FID		040000000 /* open_by_fid for known object */
#define MDS_OPEN_DELAY_CREATE  0100000000 /* delay initial object create */
#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
#define MDS_OPEN_JOIN_FILE     0400000000 /* open for join file.
					   * We do not support JOIN FILE
					   * anymore, reserve this flags
					   * just for preventing such bit
					   * to be reused. */

#define MDS_OPEN_LOCK	 04000000000 /* This open requires open lock */
#define MDS_OPEN_HAS_EA      010000000000 /* specify object create pattern */
#define MDS_OPEN_HAS_OBJS    020000000000 /* Just set the EA the obj exist */
#define MDS_OPEN_NORESTORE  0100000000000ULL /* Do not restore file at open */
#define MDS_OPEN_NEWSTRIPE  0200000000000ULL /* New stripe needed (restripe or
					      * hsm restore) */
#define MDS_OPEN_VOLATILE   0400000000000ULL /* File is volatile = created
						unlinked */

/* permission for create non-directory file */
#define MAY_CREATE      (1 << 7)
/* permission for create directory file */
#define MAY_LINK	(1 << 8)
/* permission for delete from the directory */
#define MAY_UNLINK      (1 << 9)
/* source's permission for rename */
#define MAY_RENAME_SRC  (1 << 10)
/* target's permission for rename */
#define MAY_RENAME_TAR  (1 << 11)
/* part (parent's) VTX permission check */
#define MAY_VTX_PART    (1 << 12)
/* full VTX permission check */
#define MAY_VTX_FULL    (1 << 13)
/* lfs rgetfacl permission check */
#define MAY_RGETFACL    (1 << 14)

enum {
	MDS_CHECK_SPLIT		= 1 << 0,
	MDS_CROSS_REF		= 1 << 1,
	MDS_VTX_BYPASS		= 1 << 2,
	MDS_PERM_BYPASS		= 1 << 3,
	MDS_SOM			= 1 << 4,
	MDS_QUOTA_IGNORE	= 1 << 5,
	MDS_CLOSE_CLEANUP	= 1 << 6,
	MDS_KEEP_ORPHAN		= 1 << 7,
	MDS_RECOV_OPEN		= 1 << 8,
	MDS_DATA_MODIFIED	= 1 << 9,
	MDS_CREATE_VOLATILE	= 1 << 10,
	MDS_OWNEROVERRIDE	= 1 << 11,
};

/* instance of mdt_reint_rec */
struct mdt_rec_create {
	__u32	   cr_opcode;
	__u32	   cr_cap;
	__u32	   cr_fsuid;
	__u32	   cr_fsuid_h;
	__u32	   cr_fsgid;
	__u32	   cr_fsgid_h;
	__u32	   cr_suppgid1;
	__u32	   cr_suppgid1_h;
	__u32	   cr_suppgid2;
	__u32	   cr_suppgid2_h;
	struct lu_fid   cr_fid1;
	struct lu_fid   cr_fid2;
	struct lustre_handle cr_old_handle; /* handle in case of open replay */
	obd_time	cr_time;
	__u64	   cr_rdev;
	__u64	   cr_ioepoch;
	__u64	   cr_padding_1;   /* rr_blocks */
	__u32	   cr_mode;
	__u32	   cr_bias;
	/* use of helpers set/get_mrc_cr_flags() is needed to access
	 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
	 * extend cr_flags size without breaking 1.8 compat */
	__u32	   cr_flags_l;     /* for use with open, low  32 bits  */
	__u32	   cr_flags_h;     /* for use with open, high 32 bits */
	__u32	   cr_umask;       /* umask for create */
	__u32	   cr_padding_4;   /* rr_padding_4 */
};

static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
{
	mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
	mrc->cr_flags_h = (__u32)(flags >> 32);
}

static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
{
	return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
}

/* instance of mdt_reint_rec */
struct mdt_rec_link {
	__u32	   lk_opcode;
	__u32	   lk_cap;
	__u32	   lk_fsuid;
	__u32	   lk_fsuid_h;
	__u32	   lk_fsgid;
	__u32	   lk_fsgid_h;
	__u32	   lk_suppgid1;
	__u32	   lk_suppgid1_h;
	__u32	   lk_suppgid2;
	__u32	   lk_suppgid2_h;
	struct lu_fid   lk_fid1;
	struct lu_fid   lk_fid2;
	obd_time	lk_time;
	__u64	   lk_padding_1;   /* rr_atime */
	__u64	   lk_padding_2;   /* rr_ctime */
	__u64	   lk_padding_3;   /* rr_size */
	__u64	   lk_padding_4;   /* rr_blocks */
	__u32	   lk_bias;
	__u32	   lk_padding_5;   /* rr_mode */
	__u32	   lk_padding_6;   /* rr_flags */
	__u32	   lk_padding_7;   /* rr_padding_2 */
	__u32	   lk_padding_8;   /* rr_padding_3 */
	__u32	   lk_padding_9;   /* rr_padding_4 */
};

/* instance of mdt_reint_rec */
struct mdt_rec_unlink {
	__u32	   ul_opcode;
	__u32	   ul_cap;
	__u32	   ul_fsuid;
	__u32	   ul_fsuid_h;
	__u32	   ul_fsgid;
	__u32	   ul_fsgid_h;
	__u32	   ul_suppgid1;
	__u32	   ul_suppgid1_h;
	__u32	   ul_suppgid2;
	__u32	   ul_suppgid2_h;
	struct lu_fid   ul_fid1;
	struct lu_fid   ul_fid2;
	obd_time	ul_time;
	__u64	   ul_padding_2;   /* rr_atime */
	__u64	   ul_padding_3;   /* rr_ctime */
	__u64	   ul_padding_4;   /* rr_size */
	__u64	   ul_padding_5;   /* rr_blocks */
	__u32	   ul_bias;
	__u32	   ul_mode;
	__u32	   ul_padding_6;   /* rr_flags */
	__u32	   ul_padding_7;   /* rr_padding_2 */
	__u32	   ul_padding_8;   /* rr_padding_3 */
	__u32	   ul_padding_9;   /* rr_padding_4 */
};

/* instance of mdt_reint_rec */
struct mdt_rec_rename {
	__u32	   rn_opcode;
	__u32	   rn_cap;
	__u32	   rn_fsuid;
	__u32	   rn_fsuid_h;
	__u32	   rn_fsgid;
	__u32	   rn_fsgid_h;
	__u32	   rn_suppgid1;
	__u32	   rn_suppgid1_h;
	__u32	   rn_suppgid2;
	__u32	   rn_suppgid2_h;
	struct lu_fid   rn_fid1;
	struct lu_fid   rn_fid2;
	obd_time	rn_time;
	__u64	   rn_padding_1;   /* rr_atime */
	__u64	   rn_padding_2;   /* rr_ctime */
	__u64	   rn_padding_3;   /* rr_size */
	__u64	   rn_padding_4;   /* rr_blocks */
	__u32	   rn_bias;	/* some operation flags */
	__u32	   rn_mode;	/* cross-ref rename has mode */
	__u32	   rn_padding_5;   /* rr_flags */
	__u32	   rn_padding_6;   /* rr_padding_2 */
	__u32	   rn_padding_7;   /* rr_padding_3 */
	__u32	   rn_padding_8;   /* rr_padding_4 */
};

/* instance of mdt_reint_rec */
struct mdt_rec_setxattr {
	__u32	   sx_opcode;
	__u32	   sx_cap;
	__u32	   sx_fsuid;
	__u32	   sx_fsuid_h;
	__u32	   sx_fsgid;
	__u32	   sx_fsgid_h;
	__u32	   sx_suppgid1;
	__u32	   sx_suppgid1_h;
	__u32	   sx_suppgid2;
	__u32	   sx_suppgid2_h;
	struct lu_fid   sx_fid;
	__u64	   sx_padding_1;   /* These three are rr_fid2 */
	__u32	   sx_padding_2;
	__u32	   sx_padding_3;
	__u64	   sx_valid;
	obd_time	sx_time;
	__u64	   sx_padding_5;   /* rr_ctime */
	__u64	   sx_padding_6;   /* rr_size */
	__u64	   sx_padding_7;   /* rr_blocks */
	__u32	   sx_size;
	__u32	   sx_flags;
	__u32	   sx_padding_8;   /* rr_flags */
	__u32	   sx_padding_9;   /* rr_padding_2 */
	__u32	   sx_padding_10;  /* rr_padding_3 */
	__u32	   sx_padding_11;  /* rr_padding_4 */
};

/*
 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
 * Do NOT change the size of various members, otherwise the value
 * will be broken in lustre_swab_mdt_rec_reint().
 *
 * If you add new members in other mdt_reint_xxx structres and need to use the
 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
 */
struct mdt_rec_reint {
	__u32	   rr_opcode;
	__u32	   rr_cap;
	__u32	   rr_fsuid;
	__u32	   rr_fsuid_h;
	__u32	   rr_fsgid;
	__u32	   rr_fsgid_h;
	__u32	   rr_suppgid1;
	__u32	   rr_suppgid1_h;
	__u32	   rr_suppgid2;
	__u32	   rr_suppgid2_h;
	struct lu_fid   rr_fid1;
	struct lu_fid   rr_fid2;
	obd_time	rr_mtime;
	obd_time	rr_atime;
	obd_time	rr_ctime;
	__u64	   rr_size;
	__u64	   rr_blocks;
	__u32	   rr_bias;
	__u32	   rr_mode;
	__u32	   rr_flags;
	__u32	   rr_flags_h;
	__u32	   rr_umask;
	__u32	   rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
};

extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);

struct lmv_desc {
	__u32 ld_tgt_count;		/* how many MDS's */
	__u32 ld_active_tgt_count;	 /* how many active */
	__u32 ld_default_stripe_count;     /* how many objects are used */
	__u32 ld_pattern;		  /* default MEA_MAGIC_* */
	__u64 ld_default_hash_size;
	__u64 ld_padding_1;		/* also fix lustre_swab_lmv_desc */
	__u32 ld_padding_2;		/* also fix lustre_swab_lmv_desc */
	__u32 ld_qos_maxage;	       /* in second */
	__u32 ld_padding_3;		/* also fix lustre_swab_lmv_desc */
	__u32 ld_padding_4;		/* also fix lustre_swab_lmv_desc */
	struct obd_uuid ld_uuid;
};

extern void lustre_swab_lmv_desc (struct lmv_desc *ld);

/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
struct lmv_stripe_md {
	__u32	 mea_magic;
	__u32	 mea_count;
	__u32	 mea_master;
	__u32	 mea_padding;
	char	  mea_pool_name[LOV_MAXPOOLNAME];
	struct lu_fid mea_ids[0];
};

extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);

/* lmv structures */
#define MEA_MAGIC_LAST_CHAR      0xb2221ca1
#define MEA_MAGIC_ALL_CHARS      0xb222a11c
#define MEA_MAGIC_HASH_SEGMENT   0xb222a11b

#define MAX_HASH_SIZE_32	 0x7fffffffUL
#define MAX_HASH_SIZE	    0x7fffffffffffffffULL
#define MAX_HASH_HIGHEST_BIT     0x1000000000000000ULL

enum fld_rpc_opc {
	FLD_QUERY		       = 900,
	FLD_LAST_OPC,
	FLD_FIRST_OPC		   = FLD_QUERY
};

enum seq_rpc_opc {
	SEQ_QUERY		       = 700,
	SEQ_LAST_OPC,
	SEQ_FIRST_OPC		   = SEQ_QUERY
};

enum seq_op {
	SEQ_ALLOC_SUPER = 0,
	SEQ_ALLOC_META = 1
};

/*
 *  LOV data structures
 */

#define LOV_MAX_UUID_BUFFER_SIZE  8192
/* The size of the buffer the lov/mdc reserves for the
 * array of UUIDs returned by the MDS.  With the current
 * protocol, this will limit the max number of OSTs per LOV */

#define LOV_DESC_MAGIC 0xB0CCDE5C

/* LOV settings descriptor (should only contain static info) */
struct lov_desc {
	__u32 ld_tgt_count;		/* how many OBD's */
	__u32 ld_active_tgt_count;	 /* how many active */
	__u32 ld_default_stripe_count;     /* how many objects are used */
	__u32 ld_pattern;		  /* default PATTERN_RAID0 */
	__u64 ld_default_stripe_size;      /* in bytes */
	__u64 ld_default_stripe_offset;    /* in bytes */
	__u32 ld_padding_0;		/* unused */
	__u32 ld_qos_maxage;	       /* in second */
	__u32 ld_padding_1;		/* also fix lustre_swab_lov_desc */
	__u32 ld_padding_2;		/* also fix lustre_swab_lov_desc */
	struct obd_uuid ld_uuid;
};

#define ld_magic ld_active_tgt_count       /* for swabbing from llogs */

extern void lustre_swab_lov_desc (struct lov_desc *ld);

/*
 *   LDLM requests:
 */
/* opcodes -- MUST be distinct from OST/MDS opcodes */
typedef enum {
	LDLM_ENQUEUE     = 101,
	LDLM_CONVERT     = 102,
	LDLM_CANCEL      = 103,
	LDLM_BL_CALLBACK = 104,
	LDLM_CP_CALLBACK = 105,
	LDLM_GL_CALLBACK = 106,
	LDLM_SET_INFO    = 107,
	LDLM_LAST_OPC
} ldlm_cmd_t;
#define LDLM_FIRST_OPC LDLM_ENQUEUE

#define RES_NAME_SIZE 4
struct ldlm_res_id {
	__u64 name[RES_NAME_SIZE];
};

extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);

static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
			      const struct ldlm_res_id *res1)
{
	return !memcmp(res0, res1, sizeof(*res0));
}

/* lock types */
typedef enum {
	LCK_MINMODE = 0,
	LCK_EX      = 1,
	LCK_PW      = 2,
	LCK_PR      = 4,
	LCK_CW      = 8,
	LCK_CR      = 16,
	LCK_NL      = 32,
	LCK_GROUP   = 64,
	LCK_COS     = 128,
	LCK_MAXMODE
} ldlm_mode_t;

#define LCK_MODE_NUM    8

typedef enum {
	LDLM_PLAIN     = 10,
	LDLM_EXTENT    = 11,
	LDLM_FLOCK     = 12,
	LDLM_IBITS     = 13,
	LDLM_MAX_TYPE
} ldlm_type_t;

#define LDLM_MIN_TYPE LDLM_PLAIN

struct ldlm_extent {
	__u64 start;
	__u64 end;
	__u64 gid;
};

static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
				      struct ldlm_extent *ex2)
{
	return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
}

/* check if @ex1 contains @ex2 */
static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
				      struct ldlm_extent *ex2)
{
	return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
}

struct ldlm_inodebits {
	__u64 bits;
};

struct ldlm_flock_wire {
	__u64 lfw_start;
	__u64 lfw_end;
	__u64 lfw_owner;
	__u32 lfw_padding;
	__u32 lfw_pid;
};

/* it's important that the fields of the ldlm_extent structure match
 * the first fields of the ldlm_flock structure because there is only
 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
 * this ever changes we will need to swab the union differently based
 * on the resource type. */

typedef union {
	struct ldlm_extent l_extent;
	struct ldlm_flock_wire l_flock;
	struct ldlm_inodebits l_inodebits;
} ldlm_wire_policy_data_t;

extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);

union ldlm_gl_desc {
	struct ldlm_gl_lquota_desc	lquota_desc;
};

extern void lustre_swab_gl_desc(union ldlm_gl_desc *);

struct ldlm_intent {
	__u64 opc;
};

extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);

struct ldlm_resource_desc {
	ldlm_type_t lr_type;
	__u32 lr_padding;       /* also fix lustre_swab_ldlm_resource_desc */
	struct ldlm_res_id lr_name;
};

extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);

struct ldlm_lock_desc {
	struct ldlm_resource_desc l_resource;
	ldlm_mode_t l_req_mode;
	ldlm_mode_t l_granted_mode;
	ldlm_wire_policy_data_t l_policy_data;
};

extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);

#define LDLM_LOCKREQ_HANDLES 2
#define LDLM_ENQUEUE_CANCEL_OFF 1

struct ldlm_request {
	__u32 lock_flags;
	__u32 lock_count;
	struct ldlm_lock_desc lock_desc;
	struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
};

extern void lustre_swab_ldlm_request (struct ldlm_request *rq);

/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
 * Otherwise, 2 are available. */
#define ldlm_request_bufsize(count,type)				\
({								      \
	int _avail = LDLM_LOCKREQ_HANDLES;			      \
	_avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
	sizeof(struct ldlm_request) +				   \
	(count > _avail ? count - _avail : 0) *			 \
	sizeof(struct lustre_handle);				   \
})

struct ldlm_reply {
	__u32 lock_flags;
	__u32 lock_padding;     /* also fix lustre_swab_ldlm_reply */
	struct ldlm_lock_desc lock_desc;
	struct lustre_handle lock_handle;
	__u64  lock_policy_res1;
	__u64  lock_policy_res2;
};

extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);

#define ldlm_flags_to_wire(flags)    ((__u32)(flags))
#define ldlm_flags_from_wire(flags)  ((__u64)(flags))

/*
 * Opcodes for mountconf (mgs and mgc)
 */
typedef enum {
	MGS_CONNECT = 250,
	MGS_DISCONNECT,
	MGS_EXCEPTION,	 /* node died, etc. */
	MGS_TARGET_REG,	/* whenever target starts up */
	MGS_TARGET_DEL,
	MGS_SET_INFO,
	MGS_CONFIG_READ,
	MGS_LAST_OPC
} mgs_cmd_t;
#define MGS_FIRST_OPC MGS_CONNECT

#define MGS_PARAM_MAXLEN 1024
#define KEY_SET_INFO "set_info"

struct mgs_send_param {
	char	     mgs_param[MGS_PARAM_MAXLEN];
};

/* We pass this info to the MGS so it can write config logs */
#define MTI_NAME_MAXLEN  64
#define MTI_PARAM_MAXLEN 4096
#define MTI_NIDS_MAX     32
struct mgs_target_info {
	__u32	    mti_lustre_ver;
	__u32	    mti_stripe_index;
	__u32	    mti_config_ver;
	__u32	    mti_flags;
	__u32	    mti_nid_count;
	__u32	    mti_instance; /* Running instance of target */
	char	     mti_fsname[MTI_NAME_MAXLEN];
	char	     mti_svname[MTI_NAME_MAXLEN];
	char	     mti_uuid[sizeof(struct obd_uuid)];
	__u64	    mti_nids[MTI_NIDS_MAX];     /* host nids (lnet_nid_t)*/
	char	     mti_params[MTI_PARAM_MAXLEN];
};
extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);

struct mgs_nidtbl_entry {
	__u64	   mne_version;    /* table version of this entry */
	__u32	   mne_instance;   /* target instance # */
	__u32	   mne_index;      /* target index */
	__u32	   mne_length;     /* length of this entry - by bytes */
	__u8	    mne_type;       /* target type LDD_F_SV_TYPE_OST/MDT */
	__u8	    mne_nid_type;   /* type of nid(mbz). for ipv6. */
	__u8	    mne_nid_size;   /* size of each NID, by bytes */
	__u8	    mne_nid_count;  /* # of NIDs in buffer */
	union {
		lnet_nid_t nids[0];     /* variable size buffer for NIDs. */
	} u;
};
extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);

struct mgs_config_body {
	char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
	__u64    mcb_offset;    /* next index of config log to request */
	__u16    mcb_type;      /* type of log: CONFIG_T_[CONFIG|RECOVER] */
	__u8     mcb_reserved;
	__u8     mcb_bits;      /* bits unit size of config log */
	__u32    mcb_units;     /* # of units for bulk transfer */
};
extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);

struct mgs_config_res {
	__u64    mcr_offset;    /* index of last config log */
	__u64    mcr_size;      /* size of the log */
};
extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);

/* Config marker flags (in config log) */
#define CM_START       0x01
#define CM_END	 0x02
#define CM_SKIP	0x04
#define CM_UPGRADE146  0x08
#define CM_EXCLUDE     0x10
#define CM_START_SKIP (CM_START | CM_SKIP)

struct cfg_marker {
	__u32	     cm_step;       /* aka config version */
	__u32	     cm_flags;
	__u32	     cm_vers;       /* lustre release version number */
	__u32	     cm_padding;    /* 64 bit align */
	obd_time	  cm_createtime; /*when this record was first created */
	obd_time	  cm_canceltime; /*when this record is no longer valid*/
	char	      cm_tgtname[MTI_NAME_MAXLEN];
	char	      cm_comment[MTI_NAME_MAXLEN];
};

extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
				   int swab, int size);

/*
 * Opcodes for multiple servers.
 */

typedef enum {
	OBD_PING = 400,
	OBD_LOG_CANCEL,
	OBD_QC_CALLBACK,
	OBD_IDX_READ,
	OBD_LAST_OPC
} obd_cmd_t;
#define OBD_FIRST_OPC OBD_PING

/* catalog of log objects */

/** Identifier for a single log object */
struct llog_logid {
	struct ost_id		lgl_oi;
	__u32		   lgl_ogen;
} __attribute__((packed));

/** Records written to the CATALOGS list */
#define CATLIST "CATALOGS"
struct llog_catid {
	struct llog_logid       lci_logid;
	__u32		   lci_padding1;
	__u32		   lci_padding2;
	__u32		   lci_padding3;
} __attribute__((packed));

/* Log data record types - there is no specific reason that these need to
 * be related to the RPC opcodes, but no reason not to (may be handy later?)
 */
#define LLOG_OP_MAGIC 0x10600000
#define LLOG_OP_MASK  0xfff00000

typedef enum {
	LLOG_PAD_MAGIC		= LLOG_OP_MAGIC | 0x00000,
	OST_SZ_REC		= LLOG_OP_MAGIC | 0x00f00,
	/* OST_RAID1_REC	= LLOG_OP_MAGIC | 0x01000, never used */
	MDS_UNLINK_REC		= LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
				  REINT_UNLINK, /* obsolete after 2.5.0 */
	MDS_UNLINK64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
				  REINT_UNLINK,
	/* MDS_SETATTR_REC	= LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
	MDS_SETATTR64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
				  REINT_SETATTR,
	OBD_CFG_REC		= LLOG_OP_MAGIC | 0x20000,
	/* PTL_CFG_REC		= LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
	LLOG_GEN_REC		= LLOG_OP_MAGIC | 0x40000,
	/* LLOG_JOIN_REC	= LLOG_OP_MAGIC | 0x50000, obsolete  1.8.0 */
	CHANGELOG_REC		= LLOG_OP_MAGIC | 0x60000,
	CHANGELOG_USER_REC	= LLOG_OP_MAGIC | 0x70000,
	LLOG_HDR_MAGIC		= LLOG_OP_MAGIC | 0x45539,
	LLOG_LOGID_MAGIC	= LLOG_OP_MAGIC | 0x4553b,
} llog_op_type;

#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
	(((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))

/** Log record header - stored in little endian order.
 * Each record must start with this struct, end with a llog_rec_tail,
 * and be a multiple of 256 bits in size.
 */
struct llog_rec_hdr {
	__u32	lrh_len;
	__u32	lrh_index;
	__u32	lrh_type;
	__u32	lrh_id;
};

struct llog_rec_tail {
	__u32	lrt_len;
	__u32	lrt_index;
};

/* Where data follow just after header */
#define REC_DATA(ptr)						\
	((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))

#define REC_DATA_LEN(rec)					\
	(rec->lrh_len - sizeof(struct llog_rec_hdr) -		\
	 sizeof(struct llog_rec_tail))

struct llog_logid_rec {
	struct llog_rec_hdr	lid_hdr;
	struct llog_logid	lid_id;
	__u32			lid_padding1;
	__u64			lid_padding2;
	__u64			lid_padding3;
	struct llog_rec_tail	lid_tail;
} __attribute__((packed));

struct llog_unlink_rec {
	struct llog_rec_hdr	lur_hdr;
	obd_id			lur_oid;
	obd_count		lur_oseq;
	obd_count		lur_count;
	struct llog_rec_tail	lur_tail;
} __attribute__((packed));

struct llog_unlink64_rec {
	struct llog_rec_hdr	lur_hdr;
	struct lu_fid		lur_fid;
	obd_count		lur_count; /* to destroy the lost precreated */
	__u32			lur_padding1;
	__u64			lur_padding2;
	__u64			lur_padding3;
	struct llog_rec_tail    lur_tail;
} __attribute__((packed));

struct llog_setattr64_rec {
	struct llog_rec_hdr	lsr_hdr;
	struct ost_id		lsr_oi;
	__u32			lsr_uid;
	__u32			lsr_uid_h;
	__u32			lsr_gid;
	__u32			lsr_gid_h;
	__u64			lsr_padding;
	struct llog_rec_tail    lsr_tail;
} __attribute__((packed));

struct llog_size_change_rec {
	struct llog_rec_hdr	lsc_hdr;
	struct ll_fid		lsc_fid;
	__u32			lsc_ioepoch;
	__u32			lsc_padding1;
	__u64			lsc_padding2;
	__u64			lsc_padding3;
	struct llog_rec_tail	lsc_tail;
} __attribute__((packed));

#define CHANGELOG_MAGIC 0xca103000

/** \a changelog_rec_type's that can't be masked */
#define CHANGELOG_MINMASK (1 << CL_MARK)
/** bits covering all \a changelog_rec_type's */
#define CHANGELOG_ALLMASK 0XFFFFFFFF
/** default \a changelog_rec_type mask */
#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)

/* changelog llog name, needed by client replicators */
#define CHANGELOG_CATALOG "changelog_catalog"

struct changelog_setinfo {
	__u64 cs_recno;
	__u32 cs_id;
} __attribute__((packed));

/** changelog record */
struct llog_changelog_rec {
	struct llog_rec_hdr  cr_hdr;
	struct changelog_rec cr;
	struct llog_rec_tail cr_tail; /**< for_sizezof_only */
} __attribute__((packed));

struct llog_changelog_ext_rec {
	struct llog_rec_hdr      cr_hdr;
	struct changelog_ext_rec cr;
	struct llog_rec_tail     cr_tail; /**< for_sizezof_only */
} __attribute__((packed));

#define CHANGELOG_USER_PREFIX "cl"

struct llog_changelog_user_rec {
	struct llog_rec_hdr   cur_hdr;
	__u32		 cur_id;
	__u32		 cur_padding;
	__u64		 cur_endrec;
	struct llog_rec_tail  cur_tail;
} __attribute__((packed));

/* Old llog gen for compatibility */
struct llog_gen {
	__u64 mnt_cnt;
	__u64 conn_cnt;
} __attribute__((packed));

struct llog_gen_rec {
	struct llog_rec_hdr	lgr_hdr;
	struct llog_gen		lgr_gen;
	__u64			padding1;
	__u64			padding2;
	__u64			padding3;
	struct llog_rec_tail	lgr_tail;
};

/* On-disk header structure of each log object, stored in little endian order */
#define LLOG_CHUNK_SIZE	 8192
#define LLOG_HEADER_SIZE	(96)
#define LLOG_BITMAP_BYTES       (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)

#define LLOG_MIN_REC_SIZE       (24) /* round(llog_rec_hdr + llog_rec_tail) */

/* flags for the logs */
enum llog_flag {
	LLOG_F_ZAP_WHEN_EMPTY	= 0x1,
	LLOG_F_IS_CAT		= 0x2,
	LLOG_F_IS_PLAIN		= 0x4,
};

struct llog_log_hdr {
	struct llog_rec_hdr     llh_hdr;
	obd_time		llh_timestamp;
	__u32		   llh_count;
	__u32		   llh_bitmap_offset;
	__u32		   llh_size;
	__u32		   llh_flags;
	__u32		   llh_cat_idx;
	/* for a catalog the first plain slot is next to it */
	struct obd_uuid	 llh_tgtuuid;
	__u32		   llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
	__u32		   llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
	struct llog_rec_tail    llh_tail;
} __attribute__((packed));

#define LLOG_BITMAP_SIZE(llh)  (__u32)((llh->llh_hdr.lrh_len -		\
					llh->llh_bitmap_offset -	\
					sizeof(llh->llh_tail)) * 8)

/** log cookies are used to reference a specific log file and a record therein */
struct llog_cookie {
	struct llog_logid       lgc_lgl;
	__u32		   lgc_subsys;
	__u32		   lgc_index;
	__u32		   lgc_padding;
} __attribute__((packed));

/** llog protocol */
enum llogd_rpc_ops {
	LLOG_ORIGIN_HANDLE_CREATE       = 501,
	LLOG_ORIGIN_HANDLE_NEXT_BLOCK   = 502,
	LLOG_ORIGIN_HANDLE_READ_HEADER  = 503,
	LLOG_ORIGIN_HANDLE_WRITE_REC    = 504,
	LLOG_ORIGIN_HANDLE_CLOSE	= 505,
	LLOG_ORIGIN_CONNECT	     = 506,
	LLOG_CATINFO			= 507,  /* deprecated */
	LLOG_ORIGIN_HANDLE_PREV_BLOCK   = 508,
	LLOG_ORIGIN_HANDLE_DESTROY      = 509,  /* for destroy llog object*/
	LLOG_LAST_OPC,
	LLOG_FIRST_OPC		  = LLOG_ORIGIN_HANDLE_CREATE
};

struct llogd_body {
	struct llog_logid  lgd_logid;
	__u32 lgd_ctxt_idx;
	__u32 lgd_llh_flags;
	__u32 lgd_index;
	__u32 lgd_saved_index;
	__u32 lgd_len;
	__u64 lgd_cur_offset;
} __attribute__((packed));

struct llogd_conn_body {
	struct llog_gen	 lgdc_gen;
	struct llog_logid       lgdc_logid;
	__u32		   lgdc_ctxt_idx;
} __attribute__((packed));

/* Note: 64-bit types are 64-bit aligned in structure */
struct obdo {
	obd_valid	       o_valid;	/* hot fields in this obdo */
	struct ost_id	   o_oi;
	obd_id		  o_parent_seq;
	obd_size		o_size;	 /* o_size-o_blocks == ost_lvb */
	obd_time		o_mtime;
	obd_time		o_atime;
	obd_time		o_ctime;
	obd_blocks	      o_blocks;       /* brw: cli sent cached bytes */
	obd_size		o_grant;

	/* 32-bit fields start here: keep an even number of them via padding */
	obd_blksize	     o_blksize;      /* optimal IO blocksize */
	obd_mode		o_mode;	 /* brw: cli sent cache remain */
	obd_uid		 o_uid;
	obd_gid		 o_gid;
	obd_flag		o_flags;
	obd_count	       o_nlink;	/* brw: checksum */
	obd_count	       o_parent_oid;
	obd_count		o_misc;		/* brw: o_dropped */

	__u64		   o_ioepoch;      /* epoch in ost writes */
	__u32		   o_stripe_idx;   /* holds stripe idx */
	__u32		   o_parent_ver;
	struct lustre_handle    o_handle;       /* brw: lock handle to prolong
						 * locks */
	struct llog_cookie      o_lcookie;      /* destroy: unlink cookie from
						 * MDS */
	__u32			o_uid_h;
	__u32			o_gid_h;

	__u64			o_data_version; /* getattr: sum of iversion for
						 * each stripe.
						 * brw: grant space consumed on
						 * the client for the write */
	__u64			o_padding_4;
	__u64			o_padding_5;
	__u64			o_padding_6;
};

#define o_dirty   o_blocks
#define o_undirty o_mode
#define o_dropped o_misc
#define o_cksum   o_nlink
#define o_grant_used o_data_version

static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
					struct obdo *wobdo, struct obdo *lobdo)
{
	memcpy(wobdo, lobdo, sizeof(*lobdo));
	wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
	if (ocd == NULL)
		return;

	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
	    fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
		/* Currently OBD_FL_OSTID will only be used when 2.4 echo
		 * client communicate with pre-2.4 server */
		wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
		wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
	}
}

static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
					struct obdo *lobdo, struct obdo *wobdo)
{
	obd_flag local_flags = 0;

	if (lobdo->o_valid & OBD_MD_FLFLAGS)
		 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;

	LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK));

	memcpy(lobdo, wobdo, sizeof(*lobdo));
	if (local_flags != 0) {
		lobdo->o_valid |= OBD_MD_FLFLAGS;
		lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
		lobdo->o_flags |= local_flags;
	}
	if (ocd == NULL)
		return;

	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
	    fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
		/* see above */
		lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
		lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
		lobdo->o_oi.oi_fid.f_ver = 0;
	}
}

extern void lustre_swab_obdo (struct obdo *o);

/* request structure for OST's */
struct ost_body {
	struct  obdo oa;
};

/* Key for FIEMAP to be used in get_info calls */
struct ll_fiemap_info_key {
	char    name[8];
	struct  obdo oa;
	struct  ll_user_fiemap fiemap;
};

extern void lustre_swab_ost_body (struct ost_body *b);
extern void lustre_swab_ost_last_id(obd_id *id);
extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);

extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
					    int stripe_count);
extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);

/* llog_swab.c */
extern void lustre_swab_llogd_body (struct llogd_body *d);
extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
extern void lustre_swab_llog_id(struct llog_logid *lid);

struct lustre_cfg;
extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);

/* Functions for dumping PTLRPC fields */
void dump_rniobuf(struct niobuf_remote *rnb);
void dump_ioo(struct obd_ioobj *nb);
void dump_obdo(struct obdo *oa);
void dump_ost_body(struct ost_body *ob);
void dump_rcs(__u32 *rc);

#define IDX_INFO_MAGIC 0x3D37CC37

/* Index file transfer through the network. The server serializes the index into
 * a byte stream which is sent to the client via a bulk transfer */
struct idx_info {
	__u32		ii_magic;

	/* reply: see idx_info_flags below */
	__u32		ii_flags;

	/* request & reply: number of lu_idxpage (to be) transferred */
	__u16		ii_count;
	__u16		ii_pad0;

	/* request: requested attributes passed down to the iterator API */
	__u32		ii_attrs;

	/* request & reply: index file identifier (FID) */
	struct lu_fid	ii_fid;

	/* reply: version of the index file before starting to walk the index.
	 * Please note that the version can be modified at any time during the
	 * transfer */
	__u64		ii_version;

	/* request: hash to start with:
	 * reply: hash of the first entry of the first lu_idxpage and hash
	 *	of the entry to read next if any */
	__u64		ii_hash_start;
	__u64		ii_hash_end;

	/* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
	 * set */
	__u16		ii_keysize;

	/* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
	 * is set */
	__u16		ii_recsize;

	__u32		ii_pad1;
	__u64		ii_pad2;
	__u64		ii_pad3;
};
extern void lustre_swab_idx_info(struct idx_info *ii);

#define II_END_OFF	MDS_DIR_END_OFF /* all entries have been read */

/* List of flags used in idx_info::ii_flags */
enum idx_info_flags {
	II_FL_NOHASH	= 1 << 0, /* client doesn't care about hash value */
	II_FL_VARKEY	= 1 << 1, /* keys can be of variable size */
	II_FL_VARREC	= 1 << 2, /* records can be of variable size */
	II_FL_NONUNQ	= 1 << 3, /* index supports non-unique keys */
};

#define LIP_MAGIC 0x8A6D6B6C

/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
struct lu_idxpage {
	/* 16-byte header */
	__u32	lip_magic;
	__u16	lip_flags;
	__u16	lip_nr;   /* number of entries in the container */
	__u64	lip_pad0; /* additional padding for future use */

	/* key/record pairs are stored in the remaining 4080 bytes.
	 * depending upon the flags in idx_info::ii_flags, each key/record
	 * pair might be preceded by:
	 * - a hash value
	 * - the key size (II_FL_VARKEY is set)
	 * - the record size (II_FL_VARREC is set)
	 *
	 * For the time being, we only support fixed-size key & record. */
	char	lip_entries[0];
};
extern void lustre_swab_lip_header(struct lu_idxpage *lip);

#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))

/* Gather all possible type associated with a 4KB container */
union lu_page {
	struct lu_dirpage	lp_dir; /* for MDS_READPAGE */
	struct lu_idxpage	lp_idx; /* for OBD_IDX_READ */
	char			lp_array[LU_PAGE_SIZE];
};

/* security opcodes */
typedef enum {
	SEC_CTX_INIT	    = 801,
	SEC_CTX_INIT_CONT       = 802,
	SEC_CTX_FINI	    = 803,
	SEC_LAST_OPC,
	SEC_FIRST_OPC	   = SEC_CTX_INIT
} sec_cmd_t;

/*
 * capa related definitions
 */
#define CAPA_HMAC_MAX_LEN       64
#define CAPA_HMAC_KEY_MAX_LEN   56

/* NB take care when changing the sequence of elements this struct,
 * because the offset info is used in find_capa() */
struct lustre_capa {
	struct lu_fid   lc_fid;	 /** fid */
	__u64	   lc_opc;	 /** operations allowed */
	__u64	   lc_uid;	 /** file owner */
	__u64	   lc_gid;	 /** file group */
	__u32	   lc_flags;       /** HMAC algorithm & flags */
	__u32	   lc_keyid;       /** key# used for the capability */
	__u32	   lc_timeout;     /** capa timeout value (sec) */
	__u32	   lc_expiry;      /** expiry time (sec) */
	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
} __attribute__((packed));

extern void lustre_swab_lustre_capa(struct lustre_capa *c);

/** lustre_capa::lc_opc */
enum {
	CAPA_OPC_BODY_WRITE   = 1<<0,  /**< write object data */
	CAPA_OPC_BODY_READ    = 1<<1,  /**< read object data */
	CAPA_OPC_INDEX_LOOKUP = 1<<2,  /**< lookup object fid */
	CAPA_OPC_INDEX_INSERT = 1<<3,  /**< insert object fid */
	CAPA_OPC_INDEX_DELETE = 1<<4,  /**< delete object fid */
	CAPA_OPC_OSS_WRITE    = 1<<5,  /**< write oss object data */
	CAPA_OPC_OSS_READ     = 1<<6,  /**< read oss object data */
	CAPA_OPC_OSS_TRUNC    = 1<<7,  /**< truncate oss object */
	CAPA_OPC_OSS_DESTROY  = 1<<8,  /**< destroy oss object */
	CAPA_OPC_META_WRITE   = 1<<9,  /**< write object meta data */
	CAPA_OPC_META_READ    = 1<<10, /**< read object meta data */
};

#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
#define CAPA_OPC_MDS_ONLY						   \
	(CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
	 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
#define CAPA_OPC_OSS_ONLY						   \
	(CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC |      \
	 CAPA_OPC_OSS_DESTROY)
#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)

/* MDS capability covers object capability for operations of body r/w
 * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
 * while OSS capability only covers object capability for operations of
 * oss data(file content) r/w/truncate.
 */
static inline int capa_for_mds(struct lustre_capa *c)
{
	return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
}

static inline int capa_for_oss(struct lustre_capa *c)
{
	return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
}

/* lustre_capa::lc_hmac_alg */
enum {
	CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
	CAPA_HMAC_ALG_MAX,
};

#define CAPA_FL_MASK	    0x00ffffff
#define CAPA_HMAC_ALG_MASK      0xff000000

struct lustre_capa_key {
	__u64   lk_seq;       /**< mds# */
	__u32   lk_keyid;     /**< key# */
	__u32   lk_padding;
	__u8    lk_key[CAPA_HMAC_KEY_MAX_LEN];    /**< key */
} __attribute__((packed));

extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);

/** The link ea holds 1 \a link_ea_entry for each hardlink */
#define LINK_EA_MAGIC 0x11EAF1DFUL
struct link_ea_header {
	__u32 leh_magic;
	__u32 leh_reccount;
	__u64 leh_len;      /* total size */
	/* future use */
	__u32 padding1;
	__u32 padding2;
};

/** Hardlink data is name and parent fid.
 * Stored in this crazy struct for maximum packing and endian-neutrality
 */
struct link_ea_entry {
	/** __u16 stored big-endian, unaligned */
	unsigned char      lee_reclen[2];
	unsigned char      lee_parent_fid[sizeof(struct lu_fid)];
	char	       lee_name[0];
}__attribute__((packed));

/** fid2path request/reply structure */
struct getinfo_fid2path {
	struct lu_fid   gf_fid;
	__u64	   gf_recno;
	__u32	   gf_linkno;
	__u32	   gf_pathlen;
	char	    gf_path[0];
} __attribute__((packed));

void lustre_swab_fid2path (struct getinfo_fid2path *gf);

enum {
	LAYOUT_INTENT_ACCESS    = 0,
	LAYOUT_INTENT_READ      = 1,
	LAYOUT_INTENT_WRITE     = 2,
	LAYOUT_INTENT_GLIMPSE   = 3,
	LAYOUT_INTENT_TRUNC     = 4,
	LAYOUT_INTENT_RELEASE   = 5,
	LAYOUT_INTENT_RESTORE   = 6
};

/* enqueue layout lock with intent */
struct layout_intent {
	__u32 li_opc; /* intent operation for enqueue, read, write etc */
	__u32 li_flags;
	__u64 li_start;
	__u64 li_end;
};

void lustre_swab_layout_intent(struct layout_intent *li);

/**
 * On the wire version of hsm_progress structure.
 *
 * Contains the userspace hsm_progress and some internal fields.
 */
struct hsm_progress_kernel {
	/* Field taken from struct hsm_progress */
	lustre_fid		hpk_fid;
	__u64			hpk_cookie;
	struct hsm_extent	hpk_extent;
	__u16			hpk_flags;
	__u16			hpk_errval; /* positive val */
	__u32			hpk_padding1;
	/* Additional fields */
	__u64			hpk_data_version;
	__u64			hpk_padding2;
} __attribute__((packed));

extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
extern void lustre_swab_hsm_request(struct hsm_request *hr);

/**
 * These are object update opcode under UPDATE_OBJ, which is currently
 * being used by cross-ref operations between MDT.
 *
 * During the cross-ref operation, the Master MDT, which the client send the
 * request to, will disassembly the operation into object updates, then OSP
 * will send these updates to the remote MDT to be executed.
 *
 *   Update request format
 *   magic:  UPDATE_BUFFER_MAGIC_V1
 *   Count:  How many updates in the req.
 *   bufs[0] : following are packets of object.
 *   update[0]:
 *		type: object_update_op, the op code of update
 *		fid: The object fid of the update.
 *		lens/bufs: other parameters of the update.
 *   update[1]:
 *		type: object_update_op, the op code of update
 *		fid: The object fid of the update.
 *		lens/bufs: other parameters of the update.
 *   ..........
 *   update[7]:	type: object_update_op, the op code of update
 *		fid: The object fid of the update.
 *		lens/bufs: other parameters of the update.
 *   Current 8 maxim updates per object update request.
 *
 *******************************************************************
 *   update reply format:
 *
 *   ur_version: UPDATE_REPLY_V1
 *   ur_count:   The count of the reply, which is usually equal
 *		 to the number of updates in the request.
 *   ur_lens:    The reply lengths of each object update.
 *
 *   replies:    1st update reply  [4bytes_ret: other body]
 *		 2nd update reply  [4bytes_ret: other body]
 *		 .....
 *		 nth update reply  [4bytes_ret: other body]
 *
 *   For each reply of the update, the format would be
 *	 result(4 bytes):Other stuff
 */

#define UPDATE_MAX_OPS		10
#define UPDATE_BUFFER_MAGIC_V1	0xBDDE0001
#define UPDATE_BUFFER_MAGIC	UPDATE_BUFFER_MAGIC_V1
#define UPDATE_BUF_COUNT	8
enum object_update_op {
	OBJ_CREATE		= 1,
	OBJ_DESTROY		= 2,
	OBJ_REF_ADD		= 3,
	OBJ_REF_DEL		= 4,
	OBJ_ATTR_SET		= 5,
	OBJ_ATTR_GET		= 6,
	OBJ_XATTR_SET		= 7,
	OBJ_XATTR_GET		= 8,
	OBJ_INDEX_LOOKUP	= 9,
	OBJ_INDEX_INSERT	= 10,
	OBJ_INDEX_DELETE	= 11,
	OBJ_LAST
};

struct update {
	__u32		u_type;
	__u32		u_batchid;
	struct lu_fid	u_fid;
	__u32		u_lens[UPDATE_BUF_COUNT];
	__u32		u_bufs[0];
};

struct update_buf {
	__u32	ub_magic;
	__u32	ub_count;
	__u32	ub_bufs[0];
};

#define UPDATE_REPLY_V1		0x00BD0001
struct update_reply {
	__u32	ur_version;
	__u32	ur_count;
	__u32	ur_lens[0];
};

void lustre_swab_update_buf(struct update_buf *ub);
void lustre_swab_update_reply_buf(struct update_reply *ur);

/** layout swap request structure
 * fid1 and fid2 are in mdt_body
 */
struct mdc_swap_layouts {
	__u64	   msl_flags;
} __packed;

void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);

#endif
/** @} lustreidl */