HardenedBSD src tree https://hardenedbsd.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

4514 lines
113 KiB

  1. /*-
  2. * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
  14. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  16. * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
  17. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  18. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  19. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  20. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  21. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  22. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  23. * SUCH DAMAGE.
  24. *
  25. * $FreeBSD$
  26. */
  27. #include "en.h"
  28. #include <sys/sockio.h>
  29. #include <machine/atomic.h>
  30. #ifndef ETH_DRIVER_VERSION
  31. #define ETH_DRIVER_VERSION "3.5.2"
  32. #endif
  33. #define DRIVER_RELDATE "September 2019"
  34. static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
  35. ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
  36. static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
  37. struct mlx5e_channel_param {
  38. struct mlx5e_rq_param rq;
  39. struct mlx5e_sq_param sq;
  40. struct mlx5e_cq_param rx_cq;
  41. struct mlx5e_cq_param tx_cq;
  42. };
  43. struct media {
  44. u32 subtype;
  45. u64 baudrate;
  46. };
  47. static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = {
  48. [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = {
  49. .subtype = IFM_1000_CX_SGMII,
  50. .baudrate = IF_Mbps(1000ULL),
  51. },
  52. [MLX5E_1000BASE_KX][MLX5E_KX] = {
  53. .subtype = IFM_1000_KX,
  54. .baudrate = IF_Mbps(1000ULL),
  55. },
  56. [MLX5E_10GBASE_CX4][MLX5E_CX4] = {
  57. .subtype = IFM_10G_CX4,
  58. .baudrate = IF_Gbps(10ULL),
  59. },
  60. [MLX5E_10GBASE_KX4][MLX5E_KX4] = {
  61. .subtype = IFM_10G_KX4,
  62. .baudrate = IF_Gbps(10ULL),
  63. },
  64. [MLX5E_10GBASE_KR][MLX5E_KR] = {
  65. .subtype = IFM_10G_KR,
  66. .baudrate = IF_Gbps(10ULL),
  67. },
  68. [MLX5E_20GBASE_KR2][MLX5E_KR2] = {
  69. .subtype = IFM_20G_KR2,
  70. .baudrate = IF_Gbps(20ULL),
  71. },
  72. [MLX5E_40GBASE_CR4][MLX5E_CR4] = {
  73. .subtype = IFM_40G_CR4,
  74. .baudrate = IF_Gbps(40ULL),
  75. },
  76. [MLX5E_40GBASE_KR4][MLX5E_KR4] = {
  77. .subtype = IFM_40G_KR4,
  78. .baudrate = IF_Gbps(40ULL),
  79. },
  80. [MLX5E_56GBASE_R4][MLX5E_R] = {
  81. .subtype = IFM_56G_R4,
  82. .baudrate = IF_Gbps(56ULL),
  83. },
  84. [MLX5E_10GBASE_CR][MLX5E_CR1] = {
  85. .subtype = IFM_10G_CR1,
  86. .baudrate = IF_Gbps(10ULL),
  87. },
  88. [MLX5E_10GBASE_SR][MLX5E_SR] = {
  89. .subtype = IFM_10G_SR,
  90. .baudrate = IF_Gbps(10ULL),
  91. },
  92. [MLX5E_10GBASE_ER_LR][MLX5E_ER] = {
  93. .subtype = IFM_10G_ER,
  94. .baudrate = IF_Gbps(10ULL),
  95. },
  96. [MLX5E_10GBASE_ER_LR][MLX5E_LR] = {
  97. .subtype = IFM_10G_LR,
  98. .baudrate = IF_Gbps(10ULL),
  99. },
  100. [MLX5E_40GBASE_SR4][MLX5E_SR4] = {
  101. .subtype = IFM_40G_SR4,
  102. .baudrate = IF_Gbps(40ULL),
  103. },
  104. [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = {
  105. .subtype = IFM_40G_LR4,
  106. .baudrate = IF_Gbps(40ULL),
  107. },
  108. [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = {
  109. .subtype = IFM_40G_ER4,
  110. .baudrate = IF_Gbps(40ULL),
  111. },
  112. [MLX5E_100GBASE_CR4][MLX5E_CR4] = {
  113. .subtype = IFM_100G_CR4,
  114. .baudrate = IF_Gbps(100ULL),
  115. },
  116. [MLX5E_100GBASE_SR4][MLX5E_SR4] = {
  117. .subtype = IFM_100G_SR4,
  118. .baudrate = IF_Gbps(100ULL),
  119. },
  120. [MLX5E_100GBASE_KR4][MLX5E_KR4] = {
  121. .subtype = IFM_100G_KR4,
  122. .baudrate = IF_Gbps(100ULL),
  123. },
  124. [MLX5E_100GBASE_LR4][MLX5E_LR4] = {
  125. .subtype = IFM_100G_LR4,
  126. .baudrate = IF_Gbps(100ULL),
  127. },
  128. [MLX5E_100BASE_TX][MLX5E_TX] = {
  129. .subtype = IFM_100_TX,
  130. .baudrate = IF_Mbps(100ULL),
  131. },
  132. [MLX5E_1000BASE_T][MLX5E_T] = {
  133. .subtype = IFM_1000_T,
  134. .baudrate = IF_Mbps(1000ULL),
  135. },
  136. [MLX5E_10GBASE_T][MLX5E_T] = {
  137. .subtype = IFM_10G_T,
  138. .baudrate = IF_Gbps(10ULL),
  139. },
  140. [MLX5E_25GBASE_CR][MLX5E_CR] = {
  141. .subtype = IFM_25G_CR,
  142. .baudrate = IF_Gbps(25ULL),
  143. },
  144. [MLX5E_25GBASE_KR][MLX5E_KR] = {
  145. .subtype = IFM_25G_KR,
  146. .baudrate = IF_Gbps(25ULL),
  147. },
  148. [MLX5E_25GBASE_SR][MLX5E_SR] = {
  149. .subtype = IFM_25G_SR,
  150. .baudrate = IF_Gbps(25ULL),
  151. },
  152. [MLX5E_50GBASE_CR2][MLX5E_CR2] = {
  153. .subtype = IFM_50G_CR2,
  154. .baudrate = IF_Gbps(50ULL),
  155. },
  156. [MLX5E_50GBASE_KR2][MLX5E_KR2] = {
  157. .subtype = IFM_50G_KR2,
  158. .baudrate = IF_Gbps(50ULL),
  159. },
  160. [MLX5E_50GBASE_KR4][MLX5E_KR4] = {
  161. .subtype = IFM_50G_KR4,
  162. .baudrate = IF_Gbps(50ULL),
  163. },
  164. };
  165. static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = {
  166. [MLX5E_SGMII_100M][MLX5E_SGMII] = {
  167. .subtype = IFM_100_SGMII,
  168. .baudrate = IF_Mbps(100),
  169. },
  170. [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = {
  171. .subtype = IFM_1000_KX,
  172. .baudrate = IF_Mbps(1000),
  173. },
  174. [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = {
  175. .subtype = IFM_1000_CX_SGMII,
  176. .baudrate = IF_Mbps(1000),
  177. },
  178. [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = {
  179. .subtype = IFM_1000_CX,
  180. .baudrate = IF_Mbps(1000),
  181. },
  182. [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = {
  183. .subtype = IFM_1000_LX,
  184. .baudrate = IF_Mbps(1000),
  185. },
  186. [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = {
  187. .subtype = IFM_1000_SX,
  188. .baudrate = IF_Mbps(1000),
  189. },
  190. [MLX5E_1000BASE_X_SGMII][MLX5E_T] = {
  191. .subtype = IFM_1000_T,
  192. .baudrate = IF_Mbps(1000),
  193. },
  194. [MLX5E_5GBASE_R][MLX5E_T] = {
  195. .subtype = IFM_5000_T,
  196. .baudrate = IF_Mbps(5000),
  197. },
  198. [MLX5E_5GBASE_R][MLX5E_KR] = {
  199. .subtype = IFM_5000_KR,
  200. .baudrate = IF_Mbps(5000),
  201. },
  202. [MLX5E_5GBASE_R][MLX5E_KR1] = {
  203. .subtype = IFM_5000_KR1,
  204. .baudrate = IF_Mbps(5000),
  205. },
  206. [MLX5E_5GBASE_R][MLX5E_KR_S] = {
  207. .subtype = IFM_5000_KR_S,
  208. .baudrate = IF_Mbps(5000),
  209. },
  210. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = {
  211. .subtype = IFM_10G_ER,
  212. .baudrate = IF_Gbps(10ULL),
  213. },
  214. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = {
  215. .subtype = IFM_10G_KR,
  216. .baudrate = IF_Gbps(10ULL),
  217. },
  218. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = {
  219. .subtype = IFM_10G_LR,
  220. .baudrate = IF_Gbps(10ULL),
  221. },
  222. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = {
  223. .subtype = IFM_10G_SR,
  224. .baudrate = IF_Gbps(10ULL),
  225. },
  226. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = {
  227. .subtype = IFM_10G_T,
  228. .baudrate = IF_Gbps(10ULL),
  229. },
  230. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = {
  231. .subtype = IFM_10G_AOC,
  232. .baudrate = IF_Gbps(10ULL),
  233. },
  234. [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = {
  235. .subtype = IFM_10G_CR1,
  236. .baudrate = IF_Gbps(10ULL),
  237. },
  238. [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = {
  239. .subtype = IFM_40G_CR4,
  240. .baudrate = IF_Gbps(40ULL),
  241. },
  242. [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = {
  243. .subtype = IFM_40G_KR4,
  244. .baudrate = IF_Gbps(40ULL),
  245. },
  246. [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = {
  247. .subtype = IFM_40G_LR4,
  248. .baudrate = IF_Gbps(40ULL),
  249. },
  250. [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = {
  251. .subtype = IFM_40G_SR4,
  252. .baudrate = IF_Gbps(40ULL),
  253. },
  254. [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = {
  255. .subtype = IFM_40G_ER4,
  256. .baudrate = IF_Gbps(40ULL),
  257. },
  258. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = {
  259. .subtype = IFM_25G_CR,
  260. .baudrate = IF_Gbps(25ULL),
  261. },
  262. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = {
  263. .subtype = IFM_25G_KR,
  264. .baudrate = IF_Gbps(25ULL),
  265. },
  266. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = {
  267. .subtype = IFM_25G_SR,
  268. .baudrate = IF_Gbps(25ULL),
  269. },
  270. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = {
  271. .subtype = IFM_25G_ACC,
  272. .baudrate = IF_Gbps(25ULL),
  273. },
  274. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = {
  275. .subtype = IFM_25G_AOC,
  276. .baudrate = IF_Gbps(25ULL),
  277. },
  278. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = {
  279. .subtype = IFM_25G_CR1,
  280. .baudrate = IF_Gbps(25ULL),
  281. },
  282. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = {
  283. .subtype = IFM_25G_CR_S,
  284. .baudrate = IF_Gbps(25ULL),
  285. },
  286. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = {
  287. .subtype = IFM_5000_KR1,
  288. .baudrate = IF_Gbps(25ULL),
  289. },
  290. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = {
  291. .subtype = IFM_25G_KR_S,
  292. .baudrate = IF_Gbps(25ULL),
  293. },
  294. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = {
  295. .subtype = IFM_25G_LR,
  296. .baudrate = IF_Gbps(25ULL),
  297. },
  298. [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = {
  299. .subtype = IFM_25G_T,
  300. .baudrate = IF_Gbps(25ULL),
  301. },
  302. [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = {
  303. .subtype = IFM_50G_CR2,
  304. .baudrate = IF_Gbps(50ULL),
  305. },
  306. [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = {
  307. .subtype = IFM_50G_KR2,
  308. .baudrate = IF_Gbps(50ULL),
  309. },
  310. [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR4] = {
  311. .subtype = IFM_50G_KR4,
  312. .baudrate = IF_Gbps(50ULL),
  313. },
  314. [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = {
  315. .subtype = IFM_50G_SR2,
  316. .baudrate = IF_Gbps(50ULL),
  317. },
  318. [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = {
  319. .subtype = IFM_50G_LR2,
  320. .baudrate = IF_Gbps(50ULL),
  321. },
  322. [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = {
  323. .subtype = IFM_50G_LR,
  324. .baudrate = IF_Gbps(50ULL),
  325. },
  326. [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = {
  327. .subtype = IFM_50G_SR,
  328. .baudrate = IF_Gbps(50ULL),
  329. },
  330. [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = {
  331. .subtype = IFM_50G_CP,
  332. .baudrate = IF_Gbps(50ULL),
  333. },
  334. [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = {
  335. .subtype = IFM_50G_FR,
  336. .baudrate = IF_Gbps(50ULL),
  337. },
  338. [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = {
  339. .subtype = IFM_50G_KR_PAM4,
  340. .baudrate = IF_Gbps(50ULL),
  341. },
  342. [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = {
  343. .subtype = IFM_100G_CR4,
  344. .baudrate = IF_Gbps(100ULL),
  345. },
  346. [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = {
  347. .subtype = IFM_100G_KR4,
  348. .baudrate = IF_Gbps(100ULL),
  349. },
  350. [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = {
  351. .subtype = IFM_100G_LR4,
  352. .baudrate = IF_Gbps(100ULL),
  353. },
  354. [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = {
  355. .subtype = IFM_100G_SR4,
  356. .baudrate = IF_Gbps(100ULL),
  357. },
  358. [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = {
  359. .subtype = IFM_100G_SR2,
  360. .baudrate = IF_Gbps(100ULL),
  361. },
  362. [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = {
  363. .subtype = IFM_100G_CP2,
  364. .baudrate = IF_Gbps(100ULL),
  365. },
  366. [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = {
  367. .subtype = IFM_100G_KR2_PAM4,
  368. .baudrate = IF_Gbps(100ULL),
  369. },
  370. [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = {
  371. .subtype = IFM_200G_DR4,
  372. .baudrate = IF_Gbps(200ULL),
  373. },
  374. [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = {
  375. .subtype = IFM_200G_LR4,
  376. .baudrate = IF_Gbps(200ULL),
  377. },
  378. [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = {
  379. .subtype = IFM_200G_SR4,
  380. .baudrate = IF_Gbps(200ULL),
  381. },
  382. [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = {
  383. .subtype = IFM_200G_FR4,
  384. .baudrate = IF_Gbps(200ULL),
  385. },
  386. [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = {
  387. .subtype = IFM_200G_CR4_PAM4,
  388. .baudrate = IF_Gbps(200ULL),
  389. },
  390. [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = {
  391. .subtype = IFM_200G_KR4_PAM4,
  392. .baudrate = IF_Gbps(200ULL),
  393. },
  394. };
  395. MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
  396. static void
  397. mlx5e_update_carrier(struct mlx5e_priv *priv)
  398. {
  399. struct mlx5_core_dev *mdev = priv->mdev;
  400. u32 out[MLX5_ST_SZ_DW(ptys_reg)];
  401. u32 eth_proto_oper;
  402. int error;
  403. u8 port_state;
  404. u8 is_er_type;
  405. u8 i, j;
  406. bool ext;
  407. struct media media_entry = {};
  408. port_state = mlx5_query_vport_state(mdev,
  409. MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
  410. if (port_state == VPORT_STATE_UP) {
  411. priv->media_status_last |= IFM_ACTIVE;
  412. } else {
  413. priv->media_status_last &= ~IFM_ACTIVE;
  414. priv->media_active_last = IFM_ETHER;
  415. if_link_state_change(priv->ifp, LINK_STATE_DOWN);
  416. return;
  417. }
  418. error = mlx5_query_port_ptys(mdev, out, sizeof(out),
  419. MLX5_PTYS_EN, 1);
  420. if (error) {
  421. priv->media_active_last = IFM_ETHER;
  422. priv->ifp->if_baudrate = 1;
  423. mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n",
  424. error);
  425. return;
  426. }
  427. ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
  428. eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
  429. eth_proto_oper);
  430. i = ilog2(eth_proto_oper);
  431. for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) {
  432. media_entry = ext ? mlx5e_ext_mode_table[i][j] :
  433. mlx5e_mode_table[i][j];
  434. if (media_entry.baudrate != 0)
  435. break;
  436. }
  437. if (media_entry.subtype == 0) {
  438. mlx5_en_err(priv->ifp,
  439. "Could not find operational media subtype\n");
  440. return;
  441. }
  442. switch (media_entry.subtype) {
  443. case IFM_10G_ER:
  444. error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
  445. if (error != 0) {
  446. mlx5_en_err(priv->ifp,
  447. "query port pddr failed: %d\n", error);
  448. }
  449. if (error != 0 || is_er_type == 0)
  450. media_entry.subtype = IFM_10G_LR;
  451. break;
  452. case IFM_40G_LR4:
  453. error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
  454. if (error != 0) {
  455. mlx5_en_err(priv->ifp,
  456. "query port pddr failed: %d\n", error);
  457. }
  458. if (error == 0 && is_er_type != 0)
  459. media_entry.subtype = IFM_40G_ER4;
  460. break;
  461. }
  462. priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX;
  463. priv->ifp->if_baudrate = media_entry.baudrate;
  464. if_link_state_change(priv->ifp, LINK_STATE_UP);
  465. }
  466. static void
  467. mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
  468. {
  469. struct mlx5e_priv *priv = dev->if_softc;
  470. ifmr->ifm_status = priv->media_status_last;
  471. ifmr->ifm_active = priv->media_active_last |
  472. (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
  473. (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
  474. }
  475. static u32
  476. mlx5e_find_link_mode(u32 subtype, bool ext)
  477. {
  478. u32 i;
  479. u32 j;
  480. u32 link_mode = 0;
  481. u32 speeds_num = 0;
  482. struct media media_entry = {};
  483. switch (subtype) {
  484. case IFM_10G_LR:
  485. subtype = IFM_10G_ER;
  486. break;
  487. case IFM_40G_ER4:
  488. subtype = IFM_40G_LR4;
  489. break;
  490. }
  491. speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER :
  492. MLX5E_LINK_SPEEDS_NUMBER;
  493. for (i = 0; i != speeds_num; i++) {
  494. for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) {
  495. media_entry = ext ? mlx5e_ext_mode_table[i][j] :
  496. mlx5e_mode_table[i][j];
  497. if (media_entry.baudrate == 0)
  498. continue;
  499. if (media_entry.subtype == subtype) {
  500. link_mode |= MLX5E_PROT_MASK(i);
  501. }
  502. }
  503. }
  504. return (link_mode);
  505. }
  506. static int
  507. mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
  508. {
  509. return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
  510. priv->params.rx_pauseframe_control,
  511. priv->params.tx_pauseframe_control,
  512. priv->params.rx_priority_flow_control,
  513. priv->params.tx_priority_flow_control));
  514. }
  515. static int
  516. mlx5e_set_port_pfc(struct mlx5e_priv *priv)
  517. {
  518. int error;
  519. if (priv->gone != 0) {
  520. error = -ENXIO;
  521. } else if (priv->params.rx_pauseframe_control ||
  522. priv->params.tx_pauseframe_control) {
  523. mlx5_en_err(priv->ifp,
  524. "Global pauseframes must be disabled before enabling PFC.\n");
  525. error = -EINVAL;
  526. } else {
  527. error = mlx5e_set_port_pause_and_pfc(priv);
  528. }
  529. return (error);
  530. }
  531. static int
  532. mlx5e_media_change(struct ifnet *dev)
  533. {
  534. struct mlx5e_priv *priv = dev->if_softc;
  535. struct mlx5_core_dev *mdev = priv->mdev;
  536. u32 eth_proto_cap;
  537. u32 link_mode;
  538. u32 out[MLX5_ST_SZ_DW(ptys_reg)];
  539. int was_opened;
  540. int locked;
  541. int error;
  542. bool ext;
  543. locked = PRIV_LOCKED(priv);
  544. if (!locked)
  545. PRIV_LOCK(priv);
  546. if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
  547. error = EINVAL;
  548. goto done;
  549. }
  550. error = mlx5_query_port_ptys(mdev, out, sizeof(out),
  551. MLX5_PTYS_EN, 1);
  552. if (error != 0) {
  553. mlx5_en_err(dev, "Query port media capability failed\n");
  554. goto done;
  555. }
  556. ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
  557. link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext);
  558. /* query supported capabilities */
  559. eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
  560. eth_proto_capability);
  561. /* check for autoselect */
  562. if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
  563. link_mode = eth_proto_cap;
  564. if (link_mode == 0) {
  565. mlx5_en_err(dev, "Port media capability is zero\n");
  566. error = EINVAL;
  567. goto done;
  568. }
  569. } else {
  570. link_mode = link_mode & eth_proto_cap;
  571. if (link_mode == 0) {
  572. mlx5_en_err(dev, "Not supported link mode requested\n");
  573. error = EINVAL;
  574. goto done;
  575. }
  576. }
  577. if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
  578. /* check if PFC is enabled */
  579. if (priv->params.rx_priority_flow_control ||
  580. priv->params.tx_priority_flow_control) {
  581. mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n");
  582. error = EINVAL;
  583. goto done;
  584. }
  585. }
  586. /* update pauseframe control bits */
  587. priv->params.rx_pauseframe_control =
  588. (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
  589. priv->params.tx_pauseframe_control =
  590. (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
  591. /* check if device is opened */
  592. was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
  593. /* reconfigure the hardware */
  594. mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
  595. mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext);
  596. error = -mlx5e_set_port_pause_and_pfc(priv);
  597. if (was_opened)
  598. mlx5_set_port_status(mdev, MLX5_PORT_UP);
  599. done:
  600. if (!locked)
  601. PRIV_UNLOCK(priv);
  602. return (error);
  603. }
  604. static void
  605. mlx5e_update_carrier_work(struct work_struct *work)
  606. {
  607. struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
  608. update_carrier_work);
  609. PRIV_LOCK(priv);
  610. if (test_bit(MLX5E_STATE_OPENED, &priv->state))
  611. mlx5e_update_carrier(priv);
  612. PRIV_UNLOCK(priv);
  613. }
  614. #define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \
  615. s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c);
  616. #define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \
  617. s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c);
  618. static void
  619. mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
  620. {
  621. struct mlx5_core_dev *mdev = priv->mdev;
  622. struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
  623. const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
  624. void *out;
  625. void *in;
  626. int err;
  627. /* allocate firmware request structures */
  628. in = mlx5_vzalloc(sz);
  629. out = mlx5_vzalloc(sz);
  630. if (in == NULL || out == NULL)
  631. goto free_out;
  632. MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
  633. err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
  634. if (err != 0)
  635. goto free_out;
  636. MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64)
  637. MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
  638. MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
  639. err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
  640. if (err != 0)
  641. goto free_out;
  642. MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
  643. MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP);
  644. err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
  645. if (err != 0)
  646. goto free_out;
  647. MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
  648. free_out:
  649. /* free firmware request structures */
  650. kvfree(in);
  651. kvfree(out);
  652. }
  653. /*
  654. * This function reads the physical port counters from the firmware
  655. * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
  656. * macros. The output is converted from big-endian 64-bit values into
  657. * host endian ones and stored in the "priv->stats.pport" structure.
  658. */
  659. static void
  660. mlx5e_update_pport_counters(struct mlx5e_priv *priv)
  661. {
  662. struct mlx5_core_dev *mdev = priv->mdev;
  663. struct mlx5e_pport_stats *s = &priv->stats.pport;
  664. struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
  665. u32 *in;
  666. u32 *out;
  667. const u64 *ptr;
  668. unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
  669. unsigned x;
  670. unsigned y;
  671. unsigned z;
  672. /* allocate firmware request structures */
  673. in = mlx5_vzalloc(sz);
  674. out = mlx5_vzalloc(sz);
  675. if (in == NULL || out == NULL)
  676. goto free_out;
  677. /*
  678. * Get pointer to the 64-bit counter set which is located at a
  679. * fixed offset in the output firmware request structure:
  680. */
  681. ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
  682. MLX5_SET(ppcnt_reg, in, local_port, 1);
  683. /* read IEEE802_3 counter group using predefined counter layout */
  684. MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
  685. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  686. for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
  687. x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
  688. s->arg[y] = be64toh(ptr[x]);
  689. /* read RFC2819 counter group using predefined counter layout */
  690. MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
  691. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  692. for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
  693. s->arg[y] = be64toh(ptr[x]);
  694. for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
  695. MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
  696. s_debug->arg[y] = be64toh(ptr[x]);
  697. /* read RFC2863 counter group using predefined counter layout */
  698. MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
  699. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  700. for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
  701. s_debug->arg[y] = be64toh(ptr[x]);
  702. /* read physical layer stats counter group using predefined counter layout */
  703. MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
  704. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  705. for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
  706. s_debug->arg[y] = be64toh(ptr[x]);
  707. /* read Extended Ethernet counter group using predefined counter layout */
  708. MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
  709. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  710. for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++)
  711. s_debug->arg[y] = be64toh(ptr[x]);
  712. /* read Extended Statistical Group */
  713. if (MLX5_CAP_GEN(mdev, pcam_reg) &&
  714. MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) &&
  715. MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) {
  716. /* read Extended Statistical counter group using predefined counter layout */
  717. MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
  718. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  719. for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++)
  720. s_debug->arg[y] = be64toh(ptr[x]);
  721. }
  722. /* read PCIE counters */
  723. mlx5e_update_pcie_counters(priv);
  724. /* read per-priority counters */
  725. MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
  726. /* iterate all the priorities */
  727. for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
  728. MLX5_SET(ppcnt_reg, in, prio_tc, z);
  729. mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
  730. /* read per priority stats counter group using predefined counter layout */
  731. for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
  732. MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
  733. s->arg[y] = be64toh(ptr[x]);
  734. }
  735. free_out:
  736. /* free firmware request structures */
  737. kvfree(in);
  738. kvfree(out);
  739. }
  740. static void
  741. mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
  742. {
  743. u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
  744. u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
  745. if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
  746. return;
  747. MLX5_SET(query_vnic_env_in, in, opcode,
  748. MLX5_CMD_OP_QUERY_VNIC_ENV);
  749. MLX5_SET(query_vnic_env_in, in, op_mod, 0);
  750. MLX5_SET(query_vnic_env_in, in, other_vport, 0);
  751. if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0)
  752. return;
  753. priv->stats.vport.rx_steer_missed_packets =
  754. MLX5_GET64(query_vnic_env_out, out,
  755. vport_env.nic_receive_steering_discard);
  756. }
  757. /*
  758. * This function is called regularly to collect all statistics
  759. * counters from the firmware. The values can be viewed through the
  760. * sysctl interface. Execution is serialized using the priv's global
  761. * configuration lock.
  762. */
  763. static void
  764. mlx5e_update_stats_locked(struct mlx5e_priv *priv)
  765. {
  766. struct mlx5_core_dev *mdev = priv->mdev;
  767. struct mlx5e_vport_stats *s = &priv->stats.vport;
  768. struct mlx5e_sq_stats *sq_stats;
  769. #if (__FreeBSD_version < 1100000)
  770. struct ifnet *ifp = priv->ifp;
  771. #endif
  772. u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
  773. u32 *out;
  774. int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
  775. u64 tso_packets = 0;
  776. u64 tso_bytes = 0;
  777. u64 tx_queue_dropped = 0;
  778. u64 tx_defragged = 0;
  779. u64 tx_offload_none = 0;
  780. u64 lro_packets = 0;
  781. u64 lro_bytes = 0;
  782. u64 sw_lro_queued = 0;
  783. u64 sw_lro_flushed = 0;
  784. u64 rx_csum_none = 0;
  785. u64 rx_wqe_err = 0;
  786. u64 rx_packets = 0;
  787. u64 rx_bytes = 0;
  788. u32 rx_out_of_buffer = 0;
  789. int error;
  790. int i;
  791. int j;
  792. out = mlx5_vzalloc(outlen);
  793. if (out == NULL)
  794. goto free_out;
  795. /* Collect firts the SW counters and then HW for consistency */
  796. for (i = 0; i < priv->params.num_channels; i++) {
  797. struct mlx5e_channel *pch = priv->channel + i;
  798. struct mlx5e_rq *rq = &pch->rq;
  799. struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
  800. /* collect stats from LRO */
  801. rq_stats->sw_lro_queued = rq->lro.lro_queued;
  802. rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
  803. sw_lro_queued += rq_stats->sw_lro_queued;
  804. sw_lro_flushed += rq_stats->sw_lro_flushed;
  805. lro_packets += rq_stats->lro_packets;
  806. lro_bytes += rq_stats->lro_bytes;
  807. rx_csum_none += rq_stats->csum_none;
  808. rx_wqe_err += rq_stats->wqe_err;
  809. rx_packets += rq_stats->packets;
  810. rx_bytes += rq_stats->bytes;
  811. for (j = 0; j < priv->num_tc; j++) {
  812. sq_stats = &pch->sq[j].stats;
  813. tso_packets += sq_stats->tso_packets;
  814. tso_bytes += sq_stats->tso_bytes;
  815. tx_queue_dropped += sq_stats->dropped;
  816. tx_queue_dropped += sq_stats->enobuf;
  817. tx_defragged += sq_stats->defragged;
  818. tx_offload_none += sq_stats->csum_offload_none;
  819. }
  820. }
  821. /* update counters */
  822. s->tso_packets = tso_packets;
  823. s->tso_bytes = tso_bytes;
  824. s->tx_queue_dropped = tx_queue_dropped;
  825. s->tx_defragged = tx_defragged;
  826. s->lro_packets = lro_packets;
  827. s->lro_bytes = lro_bytes;
  828. s->sw_lro_queued = sw_lro_queued;
  829. s->sw_lro_flushed = sw_lro_flushed;
  830. s->rx_csum_none = rx_csum_none;
  831. s->rx_wqe_err = rx_wqe_err;
  832. s->rx_packets = rx_packets;
  833. s->rx_bytes = rx_bytes;
  834. mlx5e_grp_vnic_env_update_stats(priv);
  835. /* HW counters */
  836. memset(in, 0, sizeof(in));
  837. MLX5_SET(query_vport_counter_in, in, opcode,
  838. MLX5_CMD_OP_QUERY_VPORT_COUNTER);
  839. MLX5_SET(query_vport_counter_in, in, op_mod, 0);
  840. MLX5_SET(query_vport_counter_in, in, other_vport, 0);
  841. memset(out, 0, outlen);
  842. /* get number of out-of-buffer drops first */
  843. if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
  844. mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
  845. &rx_out_of_buffer) == 0) {
  846. s->rx_out_of_buffer = rx_out_of_buffer;
  847. }
  848. /* get port statistics */
  849. if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) {
  850. #define MLX5_GET_CTR(out, x) \
  851. MLX5_GET64(query_vport_counter_out, out, x)
  852. s->rx_error_packets =
  853. MLX5_GET_CTR(out, received_errors.packets);
  854. s->rx_error_bytes =
  855. MLX5_GET_CTR(out, received_errors.octets);
  856. s->tx_error_packets =
  857. MLX5_GET_CTR(out, transmit_errors.packets);
  858. s->tx_error_bytes =
  859. MLX5_GET_CTR(out, transmit_errors.octets);
  860. s->rx_unicast_packets =
  861. MLX5_GET_CTR(out, received_eth_unicast.packets);
  862. s->rx_unicast_bytes =
  863. MLX5_GET_CTR(out, received_eth_unicast.octets);
  864. s->tx_unicast_packets =
  865. MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
  866. s->tx_unicast_bytes =
  867. MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
  868. s->rx_multicast_packets =
  869. MLX5_GET_CTR(out, received_eth_multicast.packets);
  870. s->rx_multicast_bytes =
  871. MLX5_GET_CTR(out, received_eth_multicast.octets);
  872. s->tx_multicast_packets =
  873. MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
  874. s->tx_multicast_bytes =
  875. MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
  876. s->rx_broadcast_packets =
  877. MLX5_GET_CTR(out, received_eth_broadcast.packets);
  878. s->rx_broadcast_bytes =
  879. MLX5_GET_CTR(out, received_eth_broadcast.octets);
  880. s->tx_broadcast_packets =
  881. MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
  882. s->tx_broadcast_bytes =
  883. MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
  884. s->tx_packets = s->tx_unicast_packets +
  885. s->tx_multicast_packets + s->tx_broadcast_packets;
  886. s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes +
  887. s->tx_broadcast_bytes;
  888. /* Update calculated offload counters */
  889. s->tx_csum_offload = s->tx_packets - tx_offload_none;
  890. s->rx_csum_good = s->rx_packets - s->rx_csum_none;
  891. }
  892. /* Get physical port counters */
  893. mlx5e_update_pport_counters(priv);
  894. s->tx_jumbo_packets =
  895. priv->stats.port_stats_debug.tx_stat_p1519to2047octets +
  896. priv->stats.port_stats_debug.tx_stat_p2048to4095octets +
  897. priv->stats.port_stats_debug.tx_stat_p4096to8191octets +
  898. priv->stats.port_stats_debug.tx_stat_p8192to10239octets;
  899. #if (__FreeBSD_version < 1100000)
  900. /* no get_counters interface in fbsd 10 */
  901. ifp->if_ipackets = s->rx_packets;
  902. ifp->if_ierrors = priv->stats.pport.in_range_len_errors +
  903. priv->stats.pport.out_of_range_len +
  904. priv->stats.pport.too_long_errors +
  905. priv->stats.pport.check_seq_err +
  906. priv->stats.pport.alignment_err;
  907. ifp->if_iqdrops = s->rx_out_of_buffer;
  908. ifp->if_opackets = s->tx_packets;
  909. ifp->if_oerrors = priv->stats.port_stats_debug.out_discards;
  910. ifp->if_snd.ifq_drops = s->tx_queue_dropped;
  911. ifp->if_ibytes = s->rx_bytes;
  912. ifp->if_obytes = s->tx_bytes;
  913. ifp->if_collisions =
  914. priv->stats.pport.collisions;
  915. #endif
  916. free_out:
  917. kvfree(out);
  918. /* Update diagnostics, if any */
  919. if (priv->params_ethtool.diag_pci_enable ||
  920. priv->params_ethtool.diag_general_enable) {
  921. error = mlx5_core_get_diagnostics_full(mdev,
  922. priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
  923. priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
  924. if (error != 0)
  925. mlx5_en_err(priv->ifp,
  926. "Failed reading diagnostics: %d\n", error);
  927. }
  928. /* Update FEC, if any */
  929. error = mlx5e_fec_update(priv);
  930. if (error != 0 && error != EOPNOTSUPP) {
  931. mlx5_en_err(priv->ifp,
  932. "Updating FEC failed: %d\n", error);
  933. }
  934. /* Update temperature, if any */
  935. if (priv->params_ethtool.hw_num_temp != 0) {
  936. error = mlx5e_hw_temperature_update(priv);
  937. if (error != 0 && error != EOPNOTSUPP) {
  938. mlx5_en_err(priv->ifp,
  939. "Updating temperature failed: %d\n", error);
  940. }
  941. }
  942. }
  943. static void
  944. mlx5e_update_stats_work(struct work_struct *work)
  945. {
  946. struct mlx5e_priv *priv;
  947. priv = container_of(work, struct mlx5e_priv, update_stats_work);
  948. PRIV_LOCK(priv);
  949. if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
  950. !test_bit(MLX5_INTERFACE_STATE_TEARDOWN, &priv->mdev->intf_state))
  951. mlx5e_update_stats_locked(priv);
  952. PRIV_UNLOCK(priv);
  953. }
  954. static void
  955. mlx5e_update_stats(void *arg)
  956. {
  957. struct mlx5e_priv *priv = arg;
  958. queue_work(priv->wq, &priv->update_stats_work);
  959. callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
  960. }
  961. static void
  962. mlx5e_async_event_sub(struct mlx5e_priv *priv,
  963. enum mlx5_dev_event event)
  964. {
  965. switch (event) {
  966. case MLX5_DEV_EVENT_PORT_UP:
  967. case MLX5_DEV_EVENT_PORT_DOWN:
  968. queue_work(priv->wq, &priv->update_carrier_work);
  969. break;
  970. default:
  971. break;
  972. }
  973. }
  974. static void
  975. mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
  976. enum mlx5_dev_event event, unsigned long param)
  977. {
  978. struct mlx5e_priv *priv = vpriv;
  979. mtx_lock(&priv->async_events_mtx);
  980. if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
  981. mlx5e_async_event_sub(priv, event);
  982. mtx_unlock(&priv->async_events_mtx);
  983. }
  984. static void
  985. mlx5e_enable_async_events(struct mlx5e_priv *priv)
  986. {
  987. set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
  988. }
  989. static void
  990. mlx5e_disable_async_events(struct mlx5e_priv *priv)
  991. {
  992. mtx_lock(&priv->async_events_mtx);
  993. clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
  994. mtx_unlock(&priv->async_events_mtx);
  995. }
  996. static void mlx5e_calibration_callout(void *arg);
  997. static int mlx5e_calibration_duration = 20;
  998. static int mlx5e_fast_calibration = 1;
  999. static int mlx5e_normal_calibration = 30;
  1000. static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0,
  1001. "MLX5 timestamp calibration parameteres");
  1002. SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN,
  1003. &mlx5e_calibration_duration, 0,
  1004. "Duration of initial calibration");
  1005. SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN,
  1006. &mlx5e_fast_calibration, 0,
  1007. "Recalibration interval during initial calibration");
  1008. SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN,
  1009. &mlx5e_normal_calibration, 0,
  1010. "Recalibration interval during normal operations");
  1011. /*
  1012. * Ignites the calibration process.
  1013. */
  1014. static void
  1015. mlx5e_reset_calibration_callout(struct mlx5e_priv *priv)
  1016. {
  1017. if (priv->clbr_done == 0)
  1018. mlx5e_calibration_callout(priv);
  1019. else
  1020. callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done <
  1021. mlx5e_calibration_duration ? mlx5e_fast_calibration :
  1022. mlx5e_normal_calibration) * hz, mlx5e_calibration_callout,
  1023. priv);
  1024. }
  1025. static uint64_t
  1026. mlx5e_timespec2usec(const struct timespec *ts)
  1027. {
  1028. return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec);
  1029. }
  1030. static uint64_t
  1031. mlx5e_hw_clock(struct mlx5e_priv *priv)
  1032. {
  1033. struct mlx5_init_seg *iseg;
  1034. uint32_t hw_h, hw_h1, hw_l;
  1035. iseg = priv->mdev->iseg;
  1036. do {
  1037. hw_h = ioread32be(&iseg->internal_timer_h);
  1038. hw_l = ioread32be(&iseg->internal_timer_l);
  1039. hw_h1 = ioread32be(&iseg->internal_timer_h);
  1040. } while (hw_h1 != hw_h);
  1041. return (((uint64_t)hw_h << 32) | hw_l);
  1042. }
  1043. /*
  1044. * The calibration callout, it runs either in the context of the
  1045. * thread which enables calibration, or in callout. It takes the
  1046. * snapshot of system and adapter clocks, then advances the pointers to
  1047. * the calibration point to allow rx path to read the consistent data
  1048. * lockless.
  1049. */
  1050. static void
  1051. mlx5e_calibration_callout(void *arg)
  1052. {
  1053. struct mlx5e_priv *priv;
  1054. struct mlx5e_clbr_point *next, *curr;
  1055. struct timespec ts;
  1056. int clbr_curr_next;
  1057. priv = arg;
  1058. curr = &priv->clbr_points[priv->clbr_curr];
  1059. clbr_curr_next = priv->clbr_curr + 1;
  1060. if (clbr_curr_next >= nitems(priv->clbr_points))
  1061. clbr_curr_next = 0;
  1062. next = &priv->clbr_points[clbr_curr_next];
  1063. next->base_prev = curr->base_curr;
  1064. next->clbr_hw_prev = curr->clbr_hw_curr;
  1065. next->clbr_hw_curr = mlx5e_hw_clock(priv);
  1066. if (((next->clbr_hw_curr - curr->clbr_hw_curr) >> MLX5E_TSTMP_PREC) ==
  1067. 0) {
  1068. if (priv->clbr_done != 0) {
  1069. mlx5_en_err(priv->ifp,
  1070. "HW failed tstmp frozen %#jx %#jx, disabling\n",
  1071. next->clbr_hw_curr, curr->clbr_hw_prev);
  1072. priv->clbr_done = 0;
  1073. }
  1074. atomic_store_rel_int(&curr->clbr_gen, 0);
  1075. return;
  1076. }
  1077. nanouptime(&ts);
  1078. next->base_curr = mlx5e_timespec2usec(&ts);
  1079. curr->clbr_gen = 0;
  1080. atomic_thread_fence_rel();
  1081. priv->clbr_curr = clbr_curr_next;
  1082. atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen));
  1083. if (priv->clbr_done < mlx5e_calibration_duration)
  1084. priv->clbr_done++;
  1085. mlx5e_reset_calibration_callout(priv);
  1086. }
  1087. static const char *mlx5e_rq_stats_desc[] = {
  1088. MLX5E_RQ_STATS(MLX5E_STATS_DESC)
  1089. };
  1090. static int
  1091. mlx5e_create_rq(struct mlx5e_channel *c,
  1092. struct mlx5e_rq_param *param,
  1093. struct mlx5e_rq *rq)
  1094. {
  1095. struct mlx5e_priv *priv = c->priv;
  1096. struct mlx5_core_dev *mdev = priv->mdev;
  1097. char buffer[16];
  1098. void *rqc = param->rqc;
  1099. void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
  1100. int wq_sz;
  1101. int err;
  1102. int i;
  1103. u32 nsegs, wqe_sz;
  1104. err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
  1105. if (err != 0)
  1106. goto done;
  1107. /* Create DMA descriptor TAG */
  1108. if ((err = -bus_dma_tag_create(
  1109. bus_get_dma_tag(mdev->pdev->dev.bsddev),
  1110. 1, /* any alignment */
  1111. 0, /* no boundary */
  1112. BUS_SPACE_MAXADDR, /* lowaddr */
  1113. BUS_SPACE_MAXADDR, /* highaddr */
  1114. NULL, NULL, /* filter, filterarg */
  1115. nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */
  1116. nsegs, /* nsegments */
  1117. nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */
  1118. 0, /* flags */
  1119. NULL, NULL, /* lockfunc, lockfuncarg */
  1120. &rq->dma_tag)))
  1121. goto done;
  1122. err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
  1123. &rq->wq_ctrl);
  1124. if (err)
  1125. goto err_free_dma_tag;
  1126. rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
  1127. err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
  1128. if (err != 0)
  1129. goto err_rq_wq_destroy;
  1130. wq_sz = mlx5_wq_ll_get_size(&rq->wq);
  1131. err = -tcp_lro_init_args(&rq->lro, c->tag.m_snd_tag.ifp, TCP_LRO_ENTRIES, wq_sz);
  1132. if (err)
  1133. goto err_rq_wq_destroy;
  1134. rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
  1135. for (i = 0; i != wq_sz; i++) {
  1136. struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
  1137. int j;
  1138. err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
  1139. if (err != 0) {
  1140. while (i--)
  1141. bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
  1142. goto err_rq_mbuf_free;
  1143. }
  1144. /* set value for constant fields */
  1145. for (j = 0; j < rq->nsegs; j++)
  1146. wqe->data[j].lkey = cpu_to_be32(priv->mr.key);
  1147. }
  1148. INIT_WORK(&rq->dim.work, mlx5e_dim_work);
  1149. if (priv->params.rx_cq_moderation_mode < 2) {
  1150. rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
  1151. } else {
  1152. void *cqc = container_of(param,
  1153. struct mlx5e_channel_param, rq)->rx_cq.cqc;
  1154. switch (MLX5_GET(cqc, cqc, cq_period_mode)) {
  1155. case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
  1156. rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
  1157. break;
  1158. case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
  1159. rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
  1160. break;
  1161. default:
  1162. rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
  1163. break;
  1164. }
  1165. }
  1166. rq->ifp = c->tag.m_snd_tag.ifp;
  1167. rq->channel = c;
  1168. rq->ix = c->ix;
  1169. snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
  1170. mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  1171. buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
  1172. rq->stats.arg);
  1173. return (0);
  1174. err_rq_mbuf_free:
  1175. free(rq->mbuf, M_MLX5EN);
  1176. tcp_lro_free(&rq->lro);
  1177. err_rq_wq_destroy:
  1178. mlx5_wq_destroy(&rq->wq_ctrl);
  1179. err_free_dma_tag:
  1180. bus_dma_tag_destroy(rq->dma_tag);
  1181. done:
  1182. return (err);
  1183. }
  1184. static void
  1185. mlx5e_destroy_rq(struct mlx5e_rq *rq)
  1186. {
  1187. int wq_sz;
  1188. int i;
  1189. /* destroy all sysctl nodes */
  1190. sysctl_ctx_free(&rq->stats.ctx);
  1191. /* free leftover LRO packets, if any */
  1192. tcp_lro_free(&rq->lro);
  1193. wq_sz = mlx5_wq_ll_get_size(&rq->wq);
  1194. for (i = 0; i != wq_sz; i++) {
  1195. if (rq->mbuf[i].mbuf != NULL) {
  1196. bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
  1197. m_freem(rq->mbuf[i].mbuf);
  1198. }
  1199. bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
  1200. }
  1201. free(rq->mbuf, M_MLX5EN);
  1202. mlx5_wq_destroy(&rq->wq_ctrl);
  1203. bus_dma_tag_destroy(rq->dma_tag);
  1204. }
  1205. static int
  1206. mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
  1207. {
  1208. struct mlx5e_channel *c = rq->channel;
  1209. struct mlx5e_priv *priv = c->priv;
  1210. struct mlx5_core_dev *mdev = priv->mdev;
  1211. void *in;
  1212. void *rqc;
  1213. void *wq;
  1214. int inlen;
  1215. int err;
  1216. inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
  1217. sizeof(u64) * rq->wq_ctrl.buf.npages;
  1218. in = mlx5_vzalloc(inlen);
  1219. if (in == NULL)
  1220. return (-ENOMEM);
  1221. rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
  1222. wq = MLX5_ADDR_OF(rqc, rqc, wq);
  1223. memcpy(rqc, param->rqc, sizeof(param->rqc));
  1224. MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
  1225. MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
  1226. MLX5_SET(rqc, rqc, flush_in_error_en, 1);
  1227. if (priv->counter_set_id >= 0)
  1228. MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
  1229. MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
  1230. PAGE_SHIFT);
  1231. MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
  1232. mlx5_fill_page_array(&rq->wq_ctrl.buf,
  1233. (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
  1234. err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
  1235. kvfree(in);
  1236. return (err);
  1237. }
  1238. static int
  1239. mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
  1240. {
  1241. struct mlx5e_channel *c = rq->channel;
  1242. struct mlx5e_priv *priv = c->priv;
  1243. struct mlx5_core_dev *mdev = priv->mdev;
  1244. void *in;
  1245. void *rqc;
  1246. int inlen;
  1247. int err;
  1248. inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
  1249. in = mlx5_vzalloc(inlen);
  1250. if (in == NULL)
  1251. return (-ENOMEM);
  1252. rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
  1253. MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
  1254. MLX5_SET(modify_rq_in, in, rq_state, curr_state);
  1255. MLX5_SET(rqc, rqc, state, next_state);
  1256. err = mlx5_core_modify_rq(mdev, in, inlen);
  1257. kvfree(in);
  1258. return (err);
  1259. }
  1260. static void
  1261. mlx5e_disable_rq(struct mlx5e_rq *rq)
  1262. {
  1263. struct mlx5e_channel *c = rq->channel;
  1264. struct mlx5e_priv *priv = c->priv;
  1265. struct mlx5_core_dev *mdev = priv->mdev;
  1266. mlx5_core_destroy_rq(mdev, rq->rqn);
  1267. }
  1268. static int
  1269. mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
  1270. {
  1271. struct mlx5e_channel *c = rq->channel;
  1272. struct mlx5e_priv *priv = c->priv;
  1273. struct mlx5_wq_ll *wq = &rq->wq;
  1274. int i;
  1275. for (i = 0; i < 1000; i++) {
  1276. if (wq->cur_sz >= priv->params.min_rx_wqes)
  1277. return (0);
  1278. msleep(4);
  1279. }
  1280. return (-ETIMEDOUT);
  1281. }
  1282. static int
  1283. mlx5e_open_rq(struct mlx5e_channel *c,
  1284. struct mlx5e_rq_param *param,
  1285. struct mlx5e_rq *rq)
  1286. {
  1287. int err;
  1288. err = mlx5e_create_rq(c, param, rq);
  1289. if (err)
  1290. return (err);
  1291. err = mlx5e_enable_rq(rq, param);
  1292. if (err)
  1293. goto err_destroy_rq;
  1294. err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
  1295. if (err)
  1296. goto err_disable_rq;
  1297. c->rq.enabled = 1;
  1298. return (0);
  1299. err_disable_rq:
  1300. mlx5e_disable_rq(rq);
  1301. err_destroy_rq:
  1302. mlx5e_destroy_rq(rq);
  1303. return (err);
  1304. }
  1305. static void
  1306. mlx5e_close_rq(struct mlx5e_rq *rq)
  1307. {
  1308. mtx_lock(&rq->mtx);
  1309. rq->enabled = 0;
  1310. callout_stop(&rq->watchdog);
  1311. mtx_unlock(&rq->mtx);
  1312. mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
  1313. }
  1314. static void
  1315. mlx5e_close_rq_wait(struct mlx5e_rq *rq)
  1316. {
  1317. mlx5e_disable_rq(rq);
  1318. mlx5e_close_cq(&rq->cq);
  1319. cancel_work_sync(&rq->dim.work);
  1320. mlx5e_destroy_rq(rq);
  1321. }
  1322. void
  1323. mlx5e_free_sq_db(struct mlx5e_sq *sq)
  1324. {
  1325. int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
  1326. int x;
  1327. for (x = 0; x != wq_sz; x++) {
  1328. if (sq->mbuf[x].mbuf != NULL) {
  1329. bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map);
  1330. m_freem(sq->mbuf[x].mbuf);
  1331. }
  1332. bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
  1333. }
  1334. free(sq->mbuf, M_MLX5EN);
  1335. }
  1336. int
  1337. mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
  1338. {
  1339. int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
  1340. int err;
  1341. int x;
  1342. sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
  1343. /* Create DMA descriptor MAPs */
  1344. for (x = 0; x != wq_sz; x++) {
  1345. err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
  1346. if (err != 0) {
  1347. while (x--)
  1348. bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
  1349. free(sq->mbuf, M_MLX5EN);
  1350. return (err);
  1351. }
  1352. }
  1353. return (0);
  1354. }
  1355. static const char *mlx5e_sq_stats_desc[] = {
  1356. MLX5E_SQ_STATS(MLX5E_STATS_DESC)
  1357. };
  1358. void
  1359. mlx5e_update_sq_inline(struct mlx5e_sq *sq)
  1360. {
  1361. sq->max_inline = sq->priv->params.tx_max_inline;
  1362. sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
  1363. /*
  1364. * Check if trust state is DSCP or if inline mode is NONE which
  1365. * indicates CX-5 or newer hardware.
  1366. */
  1367. if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
  1368. sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
  1369. if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
  1370. sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
  1371. else
  1372. sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
  1373. } else {
  1374. sq->min_insert_caps = 0;
  1375. }
  1376. }
  1377. static void
  1378. mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
  1379. {
  1380. int i;
  1381. for (i = 0; i != priv->num_tc; i++) {
  1382. mtx_lock(&c->sq[i].lock);
  1383. mlx5e_update_sq_inline(&c->sq[i]);
  1384. mtx_unlock(&c->sq[i].lock);
  1385. }
  1386. }
  1387. void
  1388. mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
  1389. {
  1390. int i;
  1391. /* check if channels are closed */
  1392. if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
  1393. return;
  1394. for (i = 0; i < priv->params.num_channels; i++)
  1395. mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
  1396. }
  1397. static int
  1398. mlx5e_create_sq(struct mlx5e_channel *c,
  1399. int tc,
  1400. struct mlx5e_sq_param *param,
  1401. struct mlx5e_sq *sq)
  1402. {
  1403. struct mlx5e_priv *priv = c->priv;
  1404. struct mlx5_core_dev *mdev = priv->mdev;
  1405. char buffer[16];
  1406. void *sqc = param->sqc;
  1407. void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
  1408. int err;
  1409. /* Create DMA descriptor TAG */
  1410. if ((err = -bus_dma_tag_create(
  1411. bus_get_dma_tag(mdev->pdev->dev.bsddev),
  1412. 1, /* any alignment */
  1413. 0, /* no boundary */
  1414. BUS_SPACE_MAXADDR, /* lowaddr */
  1415. BUS_SPACE_MAXADDR, /* highaddr */
  1416. NULL, NULL, /* filter, filterarg */
  1417. MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
  1418. MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
  1419. MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
  1420. 0, /* flags */
  1421. NULL, NULL, /* lockfunc, lockfuncarg */
  1422. &sq->dma_tag)))
  1423. goto done;
  1424. err = mlx5_alloc_map_uar(mdev, &sq->uar);
  1425. if (err)
  1426. goto err_free_dma_tag;
  1427. err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
  1428. &sq->wq_ctrl);
  1429. if (err)
  1430. goto err_unmap_free_uar;
  1431. sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
  1432. sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
  1433. err = mlx5e_alloc_sq_db(sq);
  1434. if (err)
  1435. goto err_sq_wq_destroy;
  1436. sq->mkey_be = cpu_to_be32(priv->mr.key);
  1437. sq->ifp = priv->ifp;
  1438. sq->priv = priv;
  1439. sq->tc = tc;
  1440. mlx5e_update_sq_inline(sq);
  1441. snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
  1442. mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  1443. buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
  1444. sq->stats.arg);
  1445. return (0);
  1446. err_sq_wq_destroy:
  1447. mlx5_wq_destroy(&sq->wq_ctrl);
  1448. err_unmap_free_uar:
  1449. mlx5_unmap_free_uar(mdev, &sq->uar);
  1450. err_free_dma_tag:
  1451. bus_dma_tag_destroy(sq->dma_tag);
  1452. done:
  1453. return (err);
  1454. }
  1455. static void
  1456. mlx5e_destroy_sq(struct mlx5e_sq *sq)
  1457. {
  1458. /* destroy all sysctl nodes */
  1459. sysctl_ctx_free(&sq->stats.ctx);
  1460. mlx5e_free_sq_db(sq);
  1461. mlx5_wq_destroy(&sq->wq_ctrl);
  1462. mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
  1463. bus_dma_tag_destroy(sq->dma_tag);
  1464. }
  1465. int
  1466. mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
  1467. int tis_num)
  1468. {
  1469. void *in;
  1470. void *sqc;
  1471. void *wq;
  1472. int inlen;
  1473. int err;
  1474. inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
  1475. sizeof(u64) * sq->wq_ctrl.buf.npages;
  1476. in = mlx5_vzalloc(inlen);
  1477. if (in == NULL)
  1478. return (-ENOMEM);
  1479. sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
  1480. wq = MLX5_ADDR_OF(sqc, sqc, wq);
  1481. memcpy(sqc, param->sqc, sizeof(param->sqc));
  1482. MLX5_SET(sqc, sqc, tis_num_0, tis_num);
  1483. MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
  1484. MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
  1485. MLX5_SET(sqc, sqc, tis_lst_sz, 1);
  1486. MLX5_SET(sqc, sqc, flush_in_error_en, 1);
  1487. MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
  1488. MLX5_SET(wq, wq, uar_page, sq->uar.index);
  1489. MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
  1490. PAGE_SHIFT);
  1491. MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
  1492. mlx5_fill_page_array(&sq->wq_ctrl.buf,
  1493. (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
  1494. err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
  1495. kvfree(in);
  1496. return (err);
  1497. }
  1498. int
  1499. mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
  1500. {
  1501. void *in;
  1502. void *sqc;
  1503. int inlen;
  1504. int err;
  1505. inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
  1506. in = mlx5_vzalloc(inlen);
  1507. if (in == NULL)
  1508. return (-ENOMEM);
  1509. sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
  1510. MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
  1511. MLX5_SET(modify_sq_in, in, sq_state, curr_state);
  1512. MLX5_SET(sqc, sqc, state, next_state);
  1513. err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
  1514. kvfree(in);
  1515. return (err);
  1516. }
  1517. void
  1518. mlx5e_disable_sq(struct mlx5e_sq *sq)
  1519. {
  1520. mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
  1521. }
  1522. static int
  1523. mlx5e_open_sq(struct mlx5e_channel *c,
  1524. int tc,
  1525. struct mlx5e_sq_param *param,
  1526. struct mlx5e_sq *sq)
  1527. {
  1528. int err;
  1529. sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
  1530. /* ensure the TX completion event factor is not zero */
  1531. if (sq->cev_factor == 0)
  1532. sq->cev_factor = 1;
  1533. err = mlx5e_create_sq(c, tc, param, sq);
  1534. if (err)
  1535. return (err);
  1536. err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
  1537. if (err)
  1538. goto err_destroy_sq;
  1539. err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
  1540. if (err)
  1541. goto err_disable_sq;
  1542. WRITE_ONCE(sq->running, 1);
  1543. return (0);
  1544. err_disable_sq:
  1545. mlx5e_disable_sq(sq);
  1546. err_destroy_sq:
  1547. mlx5e_destroy_sq(sq);
  1548. return (err);
  1549. }
  1550. static void
  1551. mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
  1552. {
  1553. /* fill up remainder with NOPs */
  1554. while (sq->cev_counter != 0) {
  1555. while (!mlx5e_sq_has_room_for(sq, 1)) {
  1556. if (can_sleep != 0) {
  1557. mtx_unlock(&sq->lock);
  1558. msleep(4);
  1559. mtx_lock(&sq->lock);
  1560. } else {
  1561. goto done;
  1562. }
  1563. }
  1564. /* send a single NOP */
  1565. mlx5e_send_nop(sq, 1);
  1566. atomic_thread_fence_rel();
  1567. }
  1568. done:
  1569. /* Check if we need to write the doorbell */
  1570. if (likely(sq->doorbell.d64 != 0)) {
  1571. mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
  1572. sq->doorbell.d64 = 0;
  1573. }
  1574. }
  1575. void
  1576. mlx5e_sq_cev_timeout(void *arg)
  1577. {
  1578. struct mlx5e_sq *sq = arg;
  1579. mtx_assert(&sq->lock, MA_OWNED);
  1580. /* check next state */
  1581. switch (sq->cev_next_state) {
  1582. case MLX5E_CEV_STATE_SEND_NOPS:
  1583. /* fill TX ring with NOPs, if any */
  1584. mlx5e_sq_send_nops_locked(sq, 0);
  1585. /* check if completed */
  1586. if (sq->cev_counter == 0) {
  1587. sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
  1588. return;
  1589. }
  1590. break;
  1591. default:
  1592. /* send NOPs on next timeout */
  1593. sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
  1594. break;
  1595. }
  1596. /* restart timer */
  1597. callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
  1598. }
  1599. void
  1600. mlx5e_drain_sq(struct mlx5e_sq *sq)
  1601. {
  1602. int error;
  1603. struct mlx5_core_dev *mdev= sq->priv->mdev;
  1604. /*
  1605. * Check if already stopped.
  1606. *
  1607. * NOTE: Serialization of this function is managed by the
  1608. * caller ensuring the priv's state lock is locked or in case
  1609. * of rate limit support, a single thread manages drain and
  1610. * resume of SQs. The "running" variable can therefore safely
  1611. * be read without any locks.
  1612. */
  1613. if (READ_ONCE(sq->running) == 0)
  1614. return;
  1615. /* don't put more packets into the SQ */
  1616. WRITE_ONCE(sq->running, 0);
  1617. /* serialize access to DMA rings */
  1618. mtx_lock(&sq->lock);
  1619. /* teardown event factor timer, if any */
  1620. sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
  1621. callout_stop(&sq->cev_callout);
  1622. /* send dummy NOPs in order to flush the transmit ring */
  1623. mlx5e_sq_send_nops_locked(sq, 1);
  1624. mtx_unlock(&sq->lock);
  1625. /* wait till SQ is empty or link is down */
  1626. mtx_lock(&sq->lock);
  1627. while (sq->cc != sq->pc &&
  1628. (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
  1629. mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  1630. mtx_unlock(&sq->lock);
  1631. msleep(1);
  1632. sq->cq.mcq.comp(&sq->cq.mcq);
  1633. mtx_lock(&sq->lock);
  1634. }
  1635. mtx_unlock(&sq->lock);
  1636. /* error out remaining requests */
  1637. error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
  1638. if (error != 0) {
  1639. mlx5_en_err(sq->ifp,
  1640. "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
  1641. }
  1642. /* wait till SQ is empty */
  1643. mtx_lock(&sq->lock);
  1644. while (sq->cc != sq->pc &&
  1645. mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  1646. mtx_unlock(&sq->lock);
  1647. msleep(1);
  1648. sq->cq.mcq.comp(&sq->cq.mcq);
  1649. mtx_lock(&sq->lock);
  1650. }
  1651. mtx_unlock(&sq->lock);
  1652. }
  1653. static void
  1654. mlx5e_close_sq_wait(struct mlx5e_sq *sq)
  1655. {
  1656. mlx5e_drain_sq(sq);
  1657. mlx5e_disable_sq(sq);
  1658. mlx5e_destroy_sq(sq);
  1659. }
  1660. static int
  1661. mlx5e_create_cq(struct mlx5e_priv *priv,
  1662. struct mlx5e_cq_param *param,
  1663. struct mlx5e_cq *cq,
  1664. mlx5e_cq_comp_t *comp,
  1665. int eq_ix)
  1666. {
  1667. struct mlx5_core_dev *mdev = priv->mdev;
  1668. struct mlx5_core_cq *mcq = &cq->mcq;
  1669. int eqn_not_used;
  1670. int irqn;
  1671. int err;
  1672. u32 i;
  1673. param->wq.buf_numa_node = 0;
  1674. param->wq.db_numa_node = 0;
  1675. err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
  1676. if (err)
  1677. return (err);
  1678. err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
  1679. &cq->wq_ctrl);
  1680. if (err)
  1681. return (err);
  1682. mcq->cqe_sz = 64;
  1683. mcq->set_ci_db = cq->wq_ctrl.db.db;
  1684. mcq->arm_db = cq->wq_ctrl.db.db + 1;
  1685. *mcq->set_ci_db = 0;
  1686. *mcq->arm_db = 0;
  1687. mcq->vector = eq_ix;
  1688. mcq->comp = comp;
  1689. mcq->event = mlx5e_cq_error_event;
  1690. mcq->irqn = irqn;
  1691. mcq->uar = &priv->cq_uar;
  1692. for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
  1693. struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
  1694. cqe->op_own = 0xf1;
  1695. }
  1696. cq->priv = priv;
  1697. return (0);
  1698. }
  1699. static void
  1700. mlx5e_destroy_cq(struct mlx5e_cq *cq)
  1701. {
  1702. mlx5_wq_destroy(&cq->wq_ctrl);
  1703. }
  1704. static int
  1705. mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
  1706. {
  1707. struct mlx5_core_cq *mcq = &cq->mcq;
  1708. void *in;
  1709. void *cqc;
  1710. int inlen;
  1711. int irqn_not_used;
  1712. int eqn;
  1713. int err;
  1714. inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
  1715. sizeof(u64) * cq->wq_ctrl.buf.npages;
  1716. in = mlx5_vzalloc(inlen);
  1717. if (in == NULL)
  1718. return (-ENOMEM);
  1719. cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
  1720. memcpy(cqc, param->cqc, sizeof(param->cqc));
  1721. mlx5_fill_page_array(&cq->wq_ctrl.buf,
  1722. (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
  1723. mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
  1724. MLX5_SET(cqc, cqc, c_eqn, eqn);
  1725. MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
  1726. MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
  1727. PAGE_SHIFT);
  1728. MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
  1729. err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
  1730. kvfree(in);
  1731. if (err)
  1732. return (err);
  1733. mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
  1734. return (0);
  1735. }
  1736. static void
  1737. mlx5e_disable_cq(struct mlx5e_cq *cq)
  1738. {
  1739. mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
  1740. }
  1741. int
  1742. mlx5e_open_cq(struct mlx5e_priv *priv,
  1743. struct mlx5e_cq_param *param,
  1744. struct mlx5e_cq *cq,
  1745. mlx5e_cq_comp_t *comp,
  1746. int eq_ix)
  1747. {
  1748. int err;
  1749. err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
  1750. if (err)
  1751. return (err);
  1752. err = mlx5e_enable_cq(cq, param, eq_ix);
  1753. if (err)
  1754. goto err_destroy_cq;
  1755. return (0);
  1756. err_destroy_cq:
  1757. mlx5e_destroy_cq(cq);
  1758. return (err);
  1759. }
  1760. void
  1761. mlx5e_close_cq(struct mlx5e_cq *cq)
  1762. {
  1763. mlx5e_disable_cq(cq);
  1764. mlx5e_destroy_cq(cq);
  1765. }
  1766. static int
  1767. mlx5e_open_tx_cqs(struct mlx5e_channel *c,
  1768. struct mlx5e_channel_param *cparam)
  1769. {
  1770. int err;
  1771. int tc;
  1772. for (tc = 0; tc < c->priv->num_tc; tc++) {
  1773. /* open completion queue */
  1774. err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
  1775. &mlx5e_tx_cq_comp, c->ix);
  1776. if (err)
  1777. goto err_close_tx_cqs;
  1778. }
  1779. return (0);
  1780. err_close_tx_cqs:
  1781. for (tc--; tc >= 0; tc--)
  1782. mlx5e_close_cq(&c->sq[tc].cq);
  1783. return (err);
  1784. }
  1785. static void
  1786. mlx5e_close_tx_cqs(struct mlx5e_channel *c)
  1787. {
  1788. int tc;
  1789. for (tc = 0; tc < c->priv->num_tc; tc++)
  1790. mlx5e_close_cq(&c->sq[tc].cq);
  1791. }
  1792. static int
  1793. mlx5e_open_sqs(struct mlx5e_channel *c,
  1794. struct mlx5e_channel_param *cparam)
  1795. {
  1796. int err;
  1797. int tc;
  1798. for (tc = 0; tc < c->priv->num_tc; tc++) {
  1799. err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
  1800. if (err)
  1801. goto err_close_sqs;
  1802. }
  1803. return (0);
  1804. err_close_sqs:
  1805. for (tc--; tc >= 0; tc--)
  1806. mlx5e_close_sq_wait(&c->sq[tc]);
  1807. return (err);
  1808. }
  1809. static void
  1810. mlx5e_close_sqs_wait(struct mlx5e_channel *c)
  1811. {
  1812. int tc;
  1813. for (tc = 0; tc < c->priv->num_tc; tc++)
  1814. mlx5e_close_sq_wait(&c->sq[tc]);
  1815. }
  1816. static void
  1817. mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix)
  1818. {
  1819. int tc;
  1820. /* setup priv and channel number */
  1821. c->priv = priv;
  1822. c->ix = ix;
  1823. /* setup send tag */
  1824. c->tag.m_snd_tag.ifp = priv->ifp;
  1825. c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
  1826. mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
  1827. callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
  1828. for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
  1829. struct mlx5e_sq *sq = c->sq + tc;
  1830. mtx_init(&sq->lock, "mlx5tx",
  1831. MTX_NETWORK_LOCK " TX", MTX_DEF);
  1832. mtx_init(&sq->comp_lock, "mlx5comp",
  1833. MTX_NETWORK_LOCK " TX", MTX_DEF);
  1834. callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
  1835. }
  1836. }
  1837. static void
  1838. mlx5e_chan_static_destroy(struct mlx5e_channel *c)
  1839. {
  1840. int tc;
  1841. callout_drain(&c->rq.watchdog);
  1842. mtx_destroy(&c->rq.mtx);
  1843. for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
  1844. callout_drain(&c->sq[tc].cev_callout);
  1845. mtx_destroy(&c->sq[tc].lock);
  1846. mtx_destroy(&c->sq[tc].comp_lock);
  1847. }
  1848. }
  1849. static int
  1850. mlx5e_open_channel(struct mlx5e_priv *priv,
  1851. struct mlx5e_channel_param *cparam,
  1852. struct mlx5e_channel *c)
  1853. {
  1854. int i, err;
  1855. /* zero non-persistant data */
  1856. MLX5E_ZERO(&c->rq, mlx5e_rq_zero_start);
  1857. for (i = 0; i != priv->num_tc; i++)
  1858. MLX5E_ZERO(&c->sq[i], mlx5e_sq_zero_start);
  1859. /* open transmit completion queue */
  1860. err = mlx5e_open_tx_cqs(c, cparam);
  1861. if (err)
  1862. goto err_free;
  1863. /* open receive completion queue */
  1864. err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
  1865. &mlx5e_rx_cq_comp, c->ix);
  1866. if (err)
  1867. goto err_close_tx_cqs;
  1868. err = mlx5e_open_sqs(c, cparam);
  1869. if (err)
  1870. goto err_close_rx_cq;
  1871. err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
  1872. if (err)
  1873. goto err_close_sqs;
  1874. /* poll receive queue initially */
  1875. c->rq.cq.mcq.comp(&c->rq.cq.mcq);
  1876. return (0);
  1877. err_close_sqs:
  1878. mlx5e_close_sqs_wait(c);
  1879. err_close_rx_cq:
  1880. mlx5e_close_cq(&c->rq.cq);
  1881. err_close_tx_cqs:
  1882. mlx5e_close_tx_cqs(c);
  1883. err_free:
  1884. return (err);
  1885. }
  1886. static void
  1887. mlx5e_close_channel(struct mlx5e_channel *c)
  1888. {
  1889. mlx5e_close_rq(&c->rq);
  1890. }
  1891. static void
  1892. mlx5e_close_channel_wait(struct mlx5e_channel *c)
  1893. {
  1894. mlx5e_close_rq_wait(&c->rq);
  1895. mlx5e_close_sqs_wait(c);
  1896. mlx5e_close_tx_cqs(c);
  1897. }
  1898. static int
  1899. mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
  1900. {
  1901. u32 r, n;
  1902. r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
  1903. MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
  1904. if (r > MJUM16BYTES)
  1905. return (-ENOMEM);
  1906. if (r > MJUM9BYTES)
  1907. r = MJUM16BYTES;
  1908. else if (r > MJUMPAGESIZE)
  1909. r = MJUM9BYTES;
  1910. else if (r > MCLBYTES)
  1911. r = MJUMPAGESIZE;
  1912. else
  1913. r = MCLBYTES;
  1914. /*
  1915. * n + 1 must be a power of two, because stride size must be.
  1916. * Stride size is 16 * (n + 1), as the first segment is
  1917. * control.
  1918. */
  1919. for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
  1920. ;
  1921. if (n > MLX5E_MAX_BUSDMA_RX_SEGS)
  1922. return (-ENOMEM);
  1923. *wqe_sz = r;
  1924. *nsegs = n;
  1925. return (0);
  1926. }
  1927. static void
  1928. mlx5e_build_rq_param(struct mlx5e_priv *priv,
  1929. struct mlx5e_rq_param *param)
  1930. {
  1931. void *rqc = param->rqc;
  1932. void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
  1933. u32 wqe_sz, nsegs;
  1934. mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
  1935. MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
  1936. MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
  1937. MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
  1938. nsegs * sizeof(struct mlx5_wqe_data_seg)));
  1939. MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
  1940. MLX5_SET(wq, wq, pd, priv->pdn);
  1941. param->wq.buf_numa_node = 0;
  1942. param->wq.db_numa_node = 0;
  1943. param->wq.linear = 1;
  1944. }
  1945. static void
  1946. mlx5e_build_sq_param(struct mlx5e_priv *priv,
  1947. struct mlx5e_sq_param *param)
  1948. {
  1949. void *sqc = param->sqc;
  1950. void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
  1951. MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
  1952. MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
  1953. MLX5_SET(wq, wq, pd, priv->pdn);
  1954. param->wq.buf_numa_node = 0;
  1955. param->wq.db_numa_node = 0;
  1956. param->wq.linear = 1;
  1957. }
  1958. static void
  1959. mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
  1960. struct mlx5e_cq_param *param)
  1961. {
  1962. void *cqc = param->cqc;
  1963. MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
  1964. }
  1965. static void
  1966. mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr)
  1967. {
  1968. *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE);
  1969. /* apply LRO restrictions */
  1970. if (priv->params.hw_lro_en &&
  1971. ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) {
  1972. ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO;
  1973. }
  1974. }
  1975. static void
  1976. mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
  1977. struct mlx5e_cq_param *param)
  1978. {
  1979. struct net_dim_cq_moder curr;
  1980. void *cqc = param->cqc;
  1981. /*
  1982. * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE
  1983. * format is more beneficial for FreeBSD use case.
  1984. *
  1985. * Adding support for MLX5_CQE_FORMAT_CSUM will require changes
  1986. * in mlx5e_decompress_cqe.
  1987. */
  1988. if (priv->params.cqe_zipping_en) {
  1989. MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH);
  1990. MLX5_SET(cqc, cqc, cqe_compression_en, 1);
  1991. }
  1992. MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
  1993. switch (priv->params.rx_cq_moderation_mode) {
  1994. case 0:
  1995. MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
  1996. MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
  1997. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
  1998. break;
  1999. case 1:
  2000. MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
  2001. MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
  2002. if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
  2003. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
  2004. else
  2005. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
  2006. break;
  2007. case 2:
  2008. mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr);
  2009. MLX5_SET(cqc, cqc, cq_period, curr.usec);
  2010. MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
  2011. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
  2012. break;
  2013. case 3:
  2014. mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr);
  2015. MLX5_SET(cqc, cqc, cq_period, curr.usec);
  2016. MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
  2017. if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
  2018. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
  2019. else
  2020. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
  2021. break;
  2022. default:
  2023. break;
  2024. }
  2025. mlx5e_dim_build_cq_param(priv, param);
  2026. mlx5e_build_common_cq_param(priv, param);
  2027. }
  2028. static void
  2029. mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
  2030. struct mlx5e_cq_param *param)
  2031. {
  2032. void *cqc = param->cqc;
  2033. MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
  2034. MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
  2035. MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
  2036. switch (priv->params.tx_cq_moderation_mode) {
  2037. case 0:
  2038. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
  2039. break;
  2040. default:
  2041. if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
  2042. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
  2043. else
  2044. MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
  2045. break;
  2046. }
  2047. mlx5e_build_common_cq_param(priv, param);
  2048. }
  2049. static void
  2050. mlx5e_build_channel_param(struct mlx5e_priv *priv,
  2051. struct mlx5e_channel_param *cparam)
  2052. {
  2053. memset(cparam, 0, sizeof(*cparam));
  2054. mlx5e_build_rq_param(priv, &cparam->rq);
  2055. mlx5e_build_sq_param(priv, &cparam->sq);
  2056. mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
  2057. mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
  2058. }
  2059. static int
  2060. mlx5e_open_channels(struct mlx5e_priv *priv)
  2061. {
  2062. struct mlx5e_channel_param *cparam;
  2063. int err;
  2064. int i;
  2065. int j;
  2066. cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK);
  2067. mlx5e_build_channel_param(priv, cparam);
  2068. for (i = 0; i < priv->params.num_channels; i++) {
  2069. err = mlx5e_open_channel(priv, cparam, &priv->channel[i]);
  2070. if (err)
  2071. goto err_close_channels;
  2072. }
  2073. for (j = 0; j < priv->params.num_channels; j++) {
  2074. err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
  2075. if (err)
  2076. goto err_close_channels;
  2077. }
  2078. free(cparam, M_MLX5EN);
  2079. return (0);
  2080. err_close_channels:
  2081. while (i--) {
  2082. mlx5e_close_channel(&priv->channel[i]);
  2083. mlx5e_close_channel_wait(&priv->channel[i]);
  2084. }
  2085. free(cparam, M_MLX5EN);
  2086. return (err);
  2087. }
  2088. static void
  2089. mlx5e_close_channels(struct mlx5e_priv *priv)
  2090. {
  2091. int i;
  2092. for (i = 0; i < priv->params.num_channels; i++)
  2093. mlx5e_close_channel(&priv->channel[i]);
  2094. for (i = 0; i < priv->params.num_channels; i++)
  2095. mlx5e_close_channel_wait(&priv->channel[i]);
  2096. }
  2097. static int
  2098. mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
  2099. {
  2100. if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
  2101. uint8_t cq_mode;
  2102. switch (priv->params.tx_cq_moderation_mode) {
  2103. case 0:
  2104. case 2:
  2105. cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
  2106. break;
  2107. default:
  2108. cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
  2109. break;
  2110. }
  2111. return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
  2112. priv->params.tx_cq_moderation_usec,
  2113. priv->params.tx_cq_moderation_pkts,
  2114. cq_mode));
  2115. }
  2116. return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
  2117. priv->params.tx_cq_moderation_usec,
  2118. priv->params.tx_cq_moderation_pkts));
  2119. }
  2120. static int
  2121. mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
  2122. {
  2123. if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
  2124. uint8_t cq_mode;
  2125. uint8_t dim_mode;
  2126. int retval;
  2127. switch (priv->params.rx_cq_moderation_mode) {
  2128. case 0:
  2129. case 2:
  2130. cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
  2131. dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
  2132. break;
  2133. default:
  2134. cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
  2135. dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
  2136. break;
  2137. }
  2138. /* tear down dynamic interrupt moderation */
  2139. mtx_lock(&rq->mtx);
  2140. rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
  2141. mtx_unlock(&rq->mtx);
  2142. /* wait for dynamic interrupt moderation work task, if any */
  2143. cancel_work_sync(&rq->dim.work);
  2144. if (priv->params.rx_cq_moderation_mode >= 2) {
  2145. struct net_dim_cq_moder curr;
  2146. mlx5e_get_default_profile(priv, dim_mode, &curr);
  2147. retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
  2148. curr.usec, curr.pkts, cq_mode);
  2149. /* set dynamic interrupt moderation mode and zero defaults */
  2150. mtx_lock(&rq->mtx);
  2151. rq->dim.mode = dim_mode;
  2152. rq->dim.state = 0;
  2153. rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE;
  2154. mtx_unlock(&rq->mtx);
  2155. } else {
  2156. retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
  2157. priv->params.rx_cq_moderation_usec,
  2158. priv->params.rx_cq_moderation_pkts,
  2159. cq_mode);
  2160. }
  2161. return (retval);
  2162. }
  2163. return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
  2164. priv->params.rx_cq_moderation_usec,
  2165. priv->params.rx_cq_moderation_pkts));
  2166. }
  2167. static int
  2168. mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
  2169. {
  2170. int err;
  2171. int i;
  2172. err = mlx5e_refresh_rq_params(priv, &c->rq);
  2173. if (err)
  2174. goto done;
  2175. for (i = 0; i != priv->num_tc; i++) {
  2176. err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
  2177. if (err)
  2178. goto done;
  2179. }
  2180. done:
  2181. return (err);
  2182. }
  2183. int
  2184. mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
  2185. {
  2186. int i;
  2187. /* check if channels are closed */
  2188. if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
  2189. return (EINVAL);
  2190. for (i = 0; i < priv->params.num_channels; i++) {
  2191. int err;
  2192. err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
  2193. if (err)
  2194. return (err);
  2195. }
  2196. return (0);
  2197. }
  2198. static int
  2199. mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
  2200. {
  2201. struct mlx5_core_dev *mdev = priv->mdev;
  2202. u32 in[MLX5_ST_SZ_DW(create_tis_in)];
  2203. void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
  2204. memset(in, 0, sizeof(in));
  2205. MLX5_SET(tisc, tisc, prio, tc);
  2206. MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
  2207. return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
  2208. }
  2209. static void
  2210. mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
  2211. {
  2212. mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
  2213. }
  2214. static int
  2215. mlx5e_open_tises(struct mlx5e_priv *priv)
  2216. {
  2217. int num_tc = priv->num_tc;
  2218. int err;
  2219. int tc;
  2220. for (tc = 0; tc < num_tc; tc++) {
  2221. err = mlx5e_open_tis(priv, tc);
  2222. if (err)
  2223. goto err_close_tises;
  2224. }
  2225. return (0);
  2226. err_close_tises:
  2227. for (tc--; tc >= 0; tc--)
  2228. mlx5e_close_tis(priv, tc);
  2229. return (err);
  2230. }
  2231. static void
  2232. mlx5e_close_tises(struct mlx5e_priv *priv)
  2233. {
  2234. int num_tc = priv->num_tc;
  2235. int tc;
  2236. for (tc = 0; tc < num_tc; tc++)
  2237. mlx5e_close_tis(priv, tc);
  2238. }
  2239. static int
  2240. mlx5e_open_rqt(struct mlx5e_priv *priv)
  2241. {
  2242. struct mlx5_core_dev *mdev = priv->mdev;
  2243. u32 *in;
  2244. u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
  2245. void *rqtc;
  2246. int inlen;
  2247. int err;
  2248. int sz;
  2249. int i;
  2250. sz = 1 << priv->params.rx_hash_log_tbl_sz;
  2251. inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
  2252. in = mlx5_vzalloc(inlen);
  2253. if (in == NULL)
  2254. return (-ENOMEM);
  2255. rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
  2256. MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
  2257. MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
  2258. for (i = 0; i < sz; i++) {
  2259. int ix = i;
  2260. #ifdef RSS
  2261. ix = rss_get_indirection_to_bucket(ix);
  2262. #endif
  2263. /* ensure we don't overflow */
  2264. ix %= priv->params.num_channels;
  2265. /* apply receive side scaling stride, if any */
  2266. ix -= ix % (int)priv->params.channels_rsss;
  2267. MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
  2268. }
  2269. MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
  2270. err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
  2271. if (!err)
  2272. priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
  2273. kvfree(in);
  2274. return (err);
  2275. }
  2276. static void
  2277. mlx5e_close_rqt(struct mlx5e_priv *priv)
  2278. {
  2279. u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
  2280. u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
  2281. MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
  2282. MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
  2283. mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
  2284. }
  2285. static void
  2286. mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
  2287. {
  2288. void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
  2289. __be32 *hkey;
  2290. MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
  2291. #define ROUGH_MAX_L2_L3_HDR_SZ 256
  2292. #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
  2293. MLX5_HASH_FIELD_SEL_DST_IP)
  2294. #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
  2295. MLX5_HASH_FIELD_SEL_DST_IP |\
  2296. MLX5_HASH_FIELD_SEL_L4_SPORT |\
  2297. MLX5_HASH_FIELD_SEL_L4_DPORT)
  2298. #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
  2299. MLX5_HASH_FIELD_SEL_DST_IP |\
  2300. MLX5_HASH_FIELD_SEL_IPSEC_SPI)
  2301. if (priv->params.hw_lro_en) {
  2302. MLX5_SET(tirc, tirc, lro_enable_mask,
  2303. MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
  2304. MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
  2305. MLX5_SET(tirc, tirc, lro_max_msg_sz,
  2306. (priv->params.lro_wqe_sz -
  2307. ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
  2308. /* TODO: add the option to choose timer value dynamically */
  2309. MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
  2310. MLX5_CAP_ETH(priv->mdev,
  2311. lro_timer_supported_periods[2]));
  2312. }
  2313. /* setup parameters for hashing TIR type, if any */
  2314. switch (tt) {
  2315. case MLX5E_TT_ANY:
  2316. MLX5_SET(tirc, tirc, disp_type,
  2317. MLX5_TIRC_DISP_TYPE_DIRECT);
  2318. MLX5_SET(tirc, tirc, inline_rqn,
  2319. priv->channel[0].rq.rqn);
  2320. break;
  2321. default:
  2322. MLX5_SET(tirc, tirc, disp_type,
  2323. MLX5_TIRC_DISP_TYPE_INDIRECT);
  2324. MLX5_SET(tirc, tirc, indirect_table,
  2325. priv->rqtn);
  2326. MLX5_SET(tirc, tirc, rx_hash_fn,
  2327. MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
  2328. hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
  2329. #ifdef RSS
  2330. /*
  2331. * The FreeBSD RSS implementation does currently not
  2332. * support symmetric Toeplitz hashes:
  2333. */
  2334. MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
  2335. rss_getkey((uint8_t *)hkey);
  2336. #else
  2337. MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
  2338. hkey[0] = cpu_to_be32(0xD181C62C);
  2339. hkey[1] = cpu_to_be32(0xF7F4DB5B);
  2340. hkey[2] = cpu_to_be32(0x1983A2FC);
  2341. hkey[3] = cpu_to_be32(0x943E1ADB);
  2342. hkey[4] = cpu_to_be32(0xD9389E6B);
  2343. hkey[5] = cpu_to_be32(0xD1039C2C);
  2344. hkey[6] = cpu_to_be32(0xA74499AD);
  2345. hkey[7] = cpu_to_be32(0x593D56D9);
  2346. hkey[8] = cpu_to_be32(0xF3253C06);
  2347. hkey[9] = cpu_to_be32(0x2ADC1FFC);
  2348. #endif
  2349. break;
  2350. }
  2351. switch (tt) {
  2352. case MLX5E_TT_IPV4_TCP:
  2353. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2354. MLX5_L3_PROT_TYPE_IPV4);
  2355. MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
  2356. MLX5_L4_PROT_TYPE_TCP);
  2357. #ifdef RSS
  2358. if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
  2359. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2360. MLX5_HASH_IP);
  2361. } else
  2362. #endif
  2363. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2364. MLX5_HASH_ALL);
  2365. break;
  2366. case MLX5E_TT_IPV6_TCP:
  2367. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2368. MLX5_L3_PROT_TYPE_IPV6);
  2369. MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
  2370. MLX5_L4_PROT_TYPE_TCP);
  2371. #ifdef RSS
  2372. if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
  2373. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2374. MLX5_HASH_IP);
  2375. } else
  2376. #endif
  2377. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2378. MLX5_HASH_ALL);
  2379. break;
  2380. case MLX5E_TT_IPV4_UDP:
  2381. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2382. MLX5_L3_PROT_TYPE_IPV4);
  2383. MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
  2384. MLX5_L4_PROT_TYPE_UDP);
  2385. #ifdef RSS
  2386. if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
  2387. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2388. MLX5_HASH_IP);
  2389. } else
  2390. #endif
  2391. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2392. MLX5_HASH_ALL);
  2393. break;
  2394. case MLX5E_TT_IPV6_UDP:
  2395. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2396. MLX5_L3_PROT_TYPE_IPV6);
  2397. MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
  2398. MLX5_L4_PROT_TYPE_UDP);
  2399. #ifdef RSS
  2400. if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
  2401. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2402. MLX5_HASH_IP);
  2403. } else
  2404. #endif
  2405. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2406. MLX5_HASH_ALL);
  2407. break;
  2408. case MLX5E_TT_IPV4_IPSEC_AH:
  2409. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2410. MLX5_L3_PROT_TYPE_IPV4);
  2411. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2412. MLX5_HASH_IP_IPSEC_SPI);
  2413. break;
  2414. case MLX5E_TT_IPV6_IPSEC_AH:
  2415. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2416. MLX5_L3_PROT_TYPE_IPV6);
  2417. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2418. MLX5_HASH_IP_IPSEC_SPI);
  2419. break;
  2420. case MLX5E_TT_IPV4_IPSEC_ESP:
  2421. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2422. MLX5_L3_PROT_TYPE_IPV4);
  2423. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2424. MLX5_HASH_IP_IPSEC_SPI);
  2425. break;
  2426. case MLX5E_TT_IPV6_IPSEC_ESP:
  2427. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2428. MLX5_L3_PROT_TYPE_IPV6);
  2429. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2430. MLX5_HASH_IP_IPSEC_SPI);
  2431. break;
  2432. case MLX5E_TT_IPV4:
  2433. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2434. MLX5_L3_PROT_TYPE_IPV4);
  2435. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2436. MLX5_HASH_IP);
  2437. break;
  2438. case MLX5E_TT_IPV6:
  2439. MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
  2440. MLX5_L3_PROT_TYPE_IPV6);
  2441. MLX5_SET(rx_hash_field_select, hfso, selected_fields,
  2442. MLX5_HASH_IP);
  2443. break;
  2444. default:
  2445. break;
  2446. }
  2447. }
  2448. static int
  2449. mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
  2450. {
  2451. struct mlx5_core_dev *mdev = priv->mdev;
  2452. u32 *in;
  2453. void *tirc;
  2454. int inlen;
  2455. int err;
  2456. inlen = MLX5_ST_SZ_BYTES(create_tir_in);
  2457. in = mlx5_vzalloc(inlen);
  2458. if (in == NULL)
  2459. return (-ENOMEM);
  2460. tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
  2461. mlx5e_build_tir_ctx(priv, tirc, tt);
  2462. err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
  2463. kvfree(in);
  2464. return (err);
  2465. }
  2466. static void
  2467. mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
  2468. {
  2469. mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
  2470. }
  2471. static int
  2472. mlx5e_open_tirs(struct mlx5e_priv *priv)
  2473. {
  2474. int err;
  2475. int i;
  2476. for (i = 0; i < MLX5E_NUM_TT; i++) {
  2477. err = mlx5e_open_tir(priv, i);
  2478. if (err)
  2479. goto err_close_tirs;
  2480. }
  2481. return (0);
  2482. err_close_tirs:
  2483. for (i--; i >= 0; i--)
  2484. mlx5e_close_tir(priv, i);
  2485. return (err);
  2486. }
  2487. static void
  2488. mlx5e_close_tirs(struct mlx5e_priv *priv)
  2489. {
  2490. int i;
  2491. for (i = 0; i < MLX5E_NUM_TT; i++)
  2492. mlx5e_close_tir(priv, i);
  2493. }
  2494. /*
  2495. * SW MTU does not include headers,
  2496. * HW MTU includes all headers and checksums.
  2497. */
  2498. static int
  2499. mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
  2500. {
  2501. struct mlx5e_priv *priv = ifp->if_softc;
  2502. struct mlx5_core_dev *mdev = priv->mdev;
  2503. int hw_mtu;
  2504. int err;
  2505. hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
  2506. err = mlx5_set_port_mtu(mdev, hw_mtu);
  2507. if (err) {
  2508. mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n",
  2509. sw_mtu, err);
  2510. return (err);
  2511. }
  2512. /* Update vport context MTU */
  2513. err = mlx5_set_vport_mtu(mdev, hw_mtu);
  2514. if (err) {
  2515. mlx5_en_err(ifp,
  2516. "Failed updating vport context with MTU size, err=%d\n",
  2517. err);
  2518. }
  2519. ifp->if_mtu = sw_mtu;
  2520. err = mlx5_query_vport_mtu(mdev, &hw_mtu);
  2521. if (err || !hw_mtu) {
  2522. /* fallback to port oper mtu */
  2523. err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
  2524. }
  2525. if (err) {
  2526. mlx5_en_err(ifp,
  2527. "Query port MTU, after setting new MTU value, failed\n");
  2528. return (err);
  2529. } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
  2530. err = -E2BIG,
  2531. mlx5_en_err(ifp,
  2532. "Port MTU %d is smaller than ifp mtu %d\n",
  2533. hw_mtu, sw_mtu);
  2534. } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
  2535. err = -EINVAL;
  2536. mlx5_en_err(ifp,
  2537. "Port MTU %d is bigger than ifp mtu %d\n",
  2538. hw_mtu, sw_mtu);
  2539. }
  2540. priv->params_ethtool.hw_mtu = hw_mtu;
  2541. return (err);
  2542. }
  2543. int
  2544. mlx5e_open_locked(struct ifnet *ifp)
  2545. {
  2546. struct mlx5e_priv *priv = ifp->if_softc;
  2547. int err;
  2548. u16 set_id;
  2549. /* check if already opened */
  2550. if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
  2551. return (0);
  2552. #ifdef RSS
  2553. if (rss_getnumbuckets() > priv->params.num_channels) {
  2554. mlx5_en_info(ifp,
  2555. "NOTE: There are more RSS buckets(%u) than channels(%u) available\n",
  2556. rss_getnumbuckets(), priv->params.num_channels);
  2557. }
  2558. #endif
  2559. err = mlx5e_open_tises(priv);
  2560. if (err) {
  2561. mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err);
  2562. return (err);
  2563. }
  2564. err = mlx5_vport_alloc_q_counter(priv->mdev,
  2565. MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
  2566. if (err) {
  2567. mlx5_en_err(priv->ifp,
  2568. "mlx5_vport_alloc_q_counter failed: %d\n", err);
  2569. goto err_close_tises;
  2570. }
  2571. /* store counter set ID */
  2572. priv->counter_set_id = set_id;
  2573. err = mlx5e_open_channels(priv);
  2574. if (err) {
  2575. mlx5_en_err(ifp,
  2576. "mlx5e_open_channels failed, %d\n", err);
  2577. goto err_dalloc_q_counter;
  2578. }
  2579. err = mlx5e_open_rqt(priv);
  2580. if (err) {
  2581. mlx5_en_err(ifp, "mlx5e_open_rqt failed, %d\n", err);
  2582. goto err_close_channels;
  2583. }
  2584. err = mlx5e_open_tirs(priv);
  2585. if (err) {
  2586. mlx5_en_err(ifp, "mlx5e_open_tir failed, %d\n", err);
  2587. goto err_close_rqls;
  2588. }
  2589. err = mlx5e_open_flow_table(priv);
  2590. if (err) {
  2591. mlx5_en_err(ifp,
  2592. "mlx5e_open_flow_table failed, %d\n", err);
  2593. goto err_close_tirs;
  2594. }
  2595. err = mlx5e_add_all_vlan_rules(priv);
  2596. if (err) {
  2597. mlx5_en_err(ifp,
  2598. "mlx5e_add_all_vlan_rules failed, %d\n", err);
  2599. goto err_close_flow_table;
  2600. }
  2601. set_bit(MLX5E_STATE_OPENED, &priv->state);
  2602. mlx5e_update_carrier(priv);
  2603. mlx5e_set_rx_mode_core(priv);
  2604. return (0);
  2605. err_close_flow_table:
  2606. mlx5e_close_flow_table(priv);
  2607. err_close_tirs:
  2608. mlx5e_close_tirs(priv);
  2609. err_close_rqls:
  2610. mlx5e_close_rqt(priv);
  2611. err_close_channels:
  2612. mlx5e_close_channels(priv);
  2613. err_dalloc_q_counter:
  2614. mlx5_vport_dealloc_q_counter(priv->mdev,
  2615. MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
  2616. err_close_tises:
  2617. mlx5e_close_tises(priv);
  2618. return (err);
  2619. }
  2620. static void
  2621. mlx5e_open(void *arg)
  2622. {
  2623. struct mlx5e_priv *priv = arg;
  2624. PRIV_LOCK(priv);
  2625. if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
  2626. mlx5_en_err(priv->ifp,
  2627. "Setting port status to up failed\n");
  2628. mlx5e_open_locked(priv->ifp);
  2629. priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
  2630. PRIV_UNLOCK(priv);
  2631. }
  2632. int
  2633. mlx5e_close_locked(struct ifnet *ifp)
  2634. {
  2635. struct mlx5e_priv *priv = ifp->if_softc;
  2636. /* check if already closed */
  2637. if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
  2638. return (0);
  2639. clear_bit(MLX5E_STATE_OPENED, &priv->state);
  2640. mlx5e_set_rx_mode_core(priv);
  2641. mlx5e_del_all_vlan_rules(priv);
  2642. if_link_state_change(priv->ifp, LINK_STATE_DOWN);
  2643. mlx5e_close_flow_table(priv);
  2644. mlx5e_close_tirs(priv);
  2645. mlx5e_close_rqt(priv);
  2646. mlx5e_close_channels(priv);
  2647. mlx5_vport_dealloc_q_counter(priv->mdev,
  2648. MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
  2649. mlx5e_close_tises(priv);
  2650. return (0);
  2651. }
  2652. #if (__FreeBSD_version >= 1100000)
  2653. static uint64_t
  2654. mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
  2655. {
  2656. struct mlx5e_priv *priv = ifp->if_softc;
  2657. u64 retval;
  2658. /* PRIV_LOCK(priv); XXX not allowed */
  2659. switch (cnt) {
  2660. case IFCOUNTER_IPACKETS:
  2661. retval = priv->stats.vport.rx_packets;
  2662. break;
  2663. case IFCOUNTER_IERRORS:
  2664. retval = priv->stats.pport.in_range_len_errors +
  2665. priv->stats.pport.out_of_range_len +
  2666. priv->stats.pport.too_long_errors +
  2667. priv->stats.pport.check_seq_err +
  2668. priv->stats.pport.alignment_err;
  2669. break;
  2670. case IFCOUNTER_IQDROPS:
  2671. retval = priv->stats.vport.rx_out_of_buffer;
  2672. break;
  2673. case IFCOUNTER_OPACKETS:
  2674. retval = priv->stats.vport.tx_packets;
  2675. break;
  2676. case IFCOUNTER_OERRORS:
  2677. retval = priv->stats.port_stats_debug.out_discards;
  2678. break;
  2679. case IFCOUNTER_IBYTES:
  2680. retval = priv->stats.vport.rx_bytes;
  2681. break;
  2682. case IFCOUNTER_OBYTES:
  2683. retval = priv->stats.vport.tx_bytes;
  2684. break;
  2685. case IFCOUNTER_IMCASTS:
  2686. retval = priv->stats.vport.rx_multicast_packets;
  2687. break;
  2688. case IFCOUNTER_OMCASTS:
  2689. retval = priv->stats.vport.tx_multicast_packets;
  2690. break;
  2691. case IFCOUNTER_OQDROPS:
  2692. retval = priv->stats.vport.tx_queue_dropped;
  2693. break;
  2694. case IFCOUNTER_COLLISIONS:
  2695. retval = priv->stats.pport.collisions;
  2696. break;
  2697. default:
  2698. retval = if_get_counter_default(ifp, cnt);
  2699. break;
  2700. }
  2701. /* PRIV_UNLOCK(priv); XXX not allowed */
  2702. return (retval);
  2703. }
  2704. #endif
  2705. static void
  2706. mlx5e_set_rx_mode(struct ifnet *ifp)
  2707. {
  2708. struct mlx5e_priv *priv = ifp->if_softc;
  2709. queue_work(priv->wq, &priv->set_rx_mode_work);
  2710. }
  2711. static int
  2712. mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
  2713. {
  2714. struct mlx5e_priv *priv;
  2715. struct ifreq *ifr;
  2716. struct ifi2creq i2c;
  2717. int error = 0;
  2718. int mask = 0;
  2719. int size_read = 0;
  2720. int module_status;
  2721. int module_num;
  2722. int max_mtu;
  2723. uint8_t read_addr;
  2724. priv = ifp->if_softc;
  2725. /* check if detaching */
  2726. if (priv == NULL || priv->gone != 0)
  2727. return (ENXIO);
  2728. switch (command) {
  2729. case SIOCSIFMTU:
  2730. ifr = (struct ifreq *)data;
  2731. PRIV_LOCK(priv);
  2732. mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
  2733. if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
  2734. ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
  2735. int was_opened;
  2736. was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
  2737. if (was_opened)
  2738. mlx5e_close_locked(ifp);
  2739. /* set new MTU */
  2740. mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
  2741. if (was_opened)
  2742. mlx5e_open_locked(ifp);
  2743. } else {
  2744. error = EINVAL;
  2745. mlx5_en_err(ifp,
  2746. "Invalid MTU value. Min val: %d, Max val: %d\n",
  2747. MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
  2748. }
  2749. PRIV_UNLOCK(priv);
  2750. break;
  2751. case SIOCSIFFLAGS:
  2752. if ((ifp->if_flags & IFF_UP) &&
  2753. (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  2754. mlx5e_set_rx_mode(ifp);
  2755. break;
  2756. }
  2757. PRIV_LOCK(priv);
  2758. if (ifp->if_flags & IFF_UP) {
  2759. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  2760. if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
  2761. mlx5e_open_locked(ifp);
  2762. ifp->if_drv_flags |= IFF_DRV_RUNNING;
  2763. mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
  2764. }
  2765. } else {
  2766. if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  2767. mlx5_set_port_status(priv->mdev,
  2768. MLX5_PORT_DOWN);
  2769. if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
  2770. mlx5e_close_locked(ifp);
  2771. mlx5e_update_carrier(priv);
  2772. ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  2773. }
  2774. }
  2775. PRIV_UNLOCK(priv);
  2776. break;
  2777. case SIOCADDMULTI:
  2778. case SIOCDELMULTI:
  2779. mlx5e_set_rx_mode(ifp);
  2780. break;
  2781. case SIOCSIFMEDIA:
  2782. case SIOCGIFMEDIA:
  2783. case SIOCGIFXMEDIA:
  2784. ifr = (struct ifreq *)data;
  2785. error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
  2786. break;
  2787. case SIOCSIFCAP:
  2788. ifr = (struct ifreq *)data;
  2789. PRIV_LOCK(priv);
  2790. mask = ifr->ifr_reqcap ^ ifp->if_capenable;
  2791. if (mask & IFCAP_TXCSUM) {
  2792. ifp->if_capenable ^= IFCAP_TXCSUM;
  2793. ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
  2794. if (IFCAP_TSO4 & ifp->if_capenable &&
  2795. !(IFCAP_TXCSUM & ifp->if_capenable)) {
  2796. mask &= ~IFCAP_TSO4;
  2797. ifp->if_capenable &= ~IFCAP_TSO4;
  2798. ifp->if_hwassist &= ~CSUM_IP_TSO;
  2799. mlx5_en_err(ifp,
  2800. "tso4 disabled due to -txcsum.\n");
  2801. }
  2802. }
  2803. if (mask & IFCAP_TXCSUM_IPV6) {
  2804. ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
  2805. ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
  2806. if (IFCAP_TSO6 & ifp->if_capenable &&
  2807. !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
  2808. mask &= ~IFCAP_TSO6;
  2809. ifp->if_capenable &= ~IFCAP_TSO6;
  2810. ifp->if_hwassist &= ~CSUM_IP6_TSO;
  2811. mlx5_en_err(ifp,
  2812. "tso6 disabled due to -txcsum6.\n");
  2813. }
  2814. }
  2815. if (mask & IFCAP_RXCSUM)
  2816. ifp->if_capenable ^= IFCAP_RXCSUM;
  2817. if (mask & IFCAP_RXCSUM_IPV6)
  2818. ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
  2819. if (mask & IFCAP_TSO4) {
  2820. if (!(IFCAP_TSO4 & ifp->if_capenable) &&
  2821. !(IFCAP_TXCSUM & ifp->if_capenable)) {
  2822. mlx5_en_err(ifp, "enable txcsum first.\n");
  2823. error = EAGAIN;
  2824. goto out;
  2825. }
  2826. ifp->if_capenable ^= IFCAP_TSO4;
  2827. ifp->if_hwassist ^= CSUM_IP_TSO;
  2828. }
  2829. if (mask & IFCAP_TSO6) {
  2830. if (!(IFCAP_TSO6 & ifp->if_capenable) &&
  2831. !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
  2832. mlx5_en_err(ifp, "enable txcsum6 first.\n");
  2833. error = EAGAIN;
  2834. goto out;
  2835. }
  2836. ifp->if_capenable ^= IFCAP_TSO6;
  2837. ifp->if_hwassist ^= CSUM_IP6_TSO;
  2838. }
  2839. if (mask & IFCAP_VLAN_HWFILTER) {
  2840. if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
  2841. mlx5e_disable_vlan_filter(priv);
  2842. else
  2843. mlx5e_enable_vlan_filter(priv);
  2844. ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
  2845. }
  2846. if (mask & IFCAP_VLAN_HWTAGGING)
  2847. ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
  2848. if (mask & IFCAP_WOL_MAGIC)
  2849. ifp->if_capenable ^= IFCAP_WOL_MAGIC;
  2850. VLAN_CAPABILITIES(ifp);
  2851. /* turn off LRO means also turn of HW LRO - if it's on */
  2852. if (mask & IFCAP_LRO) {
  2853. int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
  2854. bool need_restart = false;
  2855. ifp->if_capenable ^= IFCAP_LRO;
  2856. /* figure out if updating HW LRO is needed */
  2857. if (!(ifp->if_capenable & IFCAP_LRO)) {
  2858. if (priv->params.hw_lro_en) {
  2859. priv->params.hw_lro_en = false;
  2860. need_restart = true;
  2861. }
  2862. } else {
  2863. if (priv->params.hw_lro_en == false &&
  2864. priv->params_ethtool.hw_lro != 0) {
  2865. priv->params.hw_lro_en = true;
  2866. need_restart = true;
  2867. }
  2868. }
  2869. if (was_opened && need_restart) {
  2870. mlx5e_close_locked(ifp);
  2871. mlx5e_open_locked(ifp);
  2872. }
  2873. }
  2874. if (mask & IFCAP_HWRXTSTMP) {
  2875. ifp->if_capenable ^= IFCAP_HWRXTSTMP;
  2876. if (ifp->if_capenable & IFCAP_HWRXTSTMP) {
  2877. if (priv->clbr_done == 0)
  2878. mlx5e_reset_calibration_callout(priv);
  2879. } else {
  2880. callout_drain(&priv->tstmp_clbr);
  2881. priv->clbr_done = 0;
  2882. }
  2883. }
  2884. out:
  2885. PRIV_UNLOCK(priv);
  2886. break;
  2887. case SIOCGI2C:
  2888. ifr = (struct ifreq *)data;
  2889. /*
  2890. * Copy from the user-space address ifr_data to the
  2891. * kernel-space address i2c
  2892. */
  2893. error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
  2894. if (error)
  2895. break;
  2896. if (i2c.len > sizeof(i2c.data)) {
  2897. error = EINVAL;
  2898. break;
  2899. }
  2900. PRIV_LOCK(priv);
  2901. /* Get module_num which is required for the query_eeprom */
  2902. error = mlx5_query_module_num(priv->mdev, &module_num);
  2903. if (error) {
  2904. mlx5_en_err(ifp,
  2905. "Query module num failed, eeprom reading is not supported\n");
  2906. error = EINVAL;
  2907. goto err_i2c;
  2908. }
  2909. /* Check if module is present before doing an access */
  2910. module_status = mlx5_query_module_status(priv->mdev, module_num);
  2911. if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) {
  2912. error = EINVAL;
  2913. goto err_i2c;
  2914. }
  2915. /*
  2916. * Currently 0XA0 and 0xA2 are the only addresses permitted.
  2917. * The internal conversion is as follows:
  2918. */
  2919. if (i2c.dev_addr == 0xA0)
  2920. read_addr = MLX5_I2C_ADDR_LOW;
  2921. else if (i2c.dev_addr == 0xA2)
  2922. read_addr = MLX5_I2C_ADDR_HIGH;
  2923. else {
  2924. mlx5_en_err(ifp,
  2925. "Query eeprom failed, Invalid Address: %X\n",
  2926. i2c.dev_addr);
  2927. error = EINVAL;
  2928. goto err_i2c;
  2929. }
  2930. error = mlx5_query_eeprom(priv->mdev,
  2931. read_addr, MLX5_EEPROM_LOW_PAGE,
  2932. (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
  2933. (uint32_t *)i2c.data, &size_read);
  2934. if (error) {
  2935. mlx5_en_err(ifp,
  2936. "Query eeprom failed, eeprom reading is not supported\n");
  2937. error = EINVAL;
  2938. goto err_i2c;
  2939. }
  2940. if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
  2941. error = mlx5_query_eeprom(priv->mdev,
  2942. read_addr, MLX5_EEPROM_LOW_PAGE,
  2943. (uint32_t)(i2c.offset + size_read),
  2944. (uint32_t)(i2c.len - size_read), module_num,
  2945. (uint32_t *)(i2c.data + size_read), &size_read);
  2946. }
  2947. if (error) {
  2948. mlx5_en_err(ifp,
  2949. "Query eeprom failed, eeprom reading is not supported\n");
  2950. error = EINVAL;
  2951. goto err_i2c;
  2952. }
  2953. error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
  2954. err_i2c:
  2955. PRIV_UNLOCK(priv);
  2956. break;
  2957. default:
  2958. error = ether_ioctl(ifp, command, data);
  2959. break;
  2960. }
  2961. return (error);
  2962. }
  2963. static int
  2964. mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
  2965. {
  2966. /*
  2967. * TODO: uncoment once FW really sets all these bits if
  2968. * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
  2969. * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
  2970. * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
  2971. * -ENOTSUPP;
  2972. */
  2973. /* TODO: add more must-to-have features */
  2974. if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
  2975. return (-ENODEV);
  2976. return (0);
  2977. }
  2978. static u16
  2979. mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
  2980. {
  2981. const int min_size = ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN;
  2982. const int max_size = MLX5E_MAX_TX_INLINE;
  2983. const int bf_buf_size =
  2984. ((1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U) -
  2985. (sizeof(struct mlx5e_tx_wqe) - 2);
  2986. /* verify against driver limits */
  2987. if (bf_buf_size > max_size)
  2988. return (max_size);
  2989. else if (bf_buf_size < min_size)
  2990. return (min_size);
  2991. else
  2992. return (bf_buf_size);
  2993. }
  2994. static int
  2995. mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
  2996. struct mlx5e_priv *priv,
  2997. int num_comp_vectors)
  2998. {
  2999. int err;
  3000. /*
  3001. * TODO: Consider link speed for setting "log_sq_size",
  3002. * "log_rq_size" and "cq_moderation_xxx":
  3003. */
  3004. priv->params.log_sq_size =
  3005. MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
  3006. priv->params.log_rq_size =
  3007. MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
  3008. priv->params.rx_cq_moderation_usec =
  3009. MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
  3010. MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
  3011. MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
  3012. priv->params.rx_cq_moderation_mode =
  3013. MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
  3014. priv->params.rx_cq_moderation_pkts =
  3015. MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
  3016. priv->params.tx_cq_moderation_usec =
  3017. MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
  3018. priv->params.tx_cq_moderation_pkts =
  3019. MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
  3020. priv->params.min_rx_wqes =
  3021. MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
  3022. priv->params.rx_hash_log_tbl_sz =
  3023. (order_base_2(num_comp_vectors) >
  3024. MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
  3025. order_base_2(num_comp_vectors) :
  3026. MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
  3027. priv->params.num_tc = 1;
  3028. priv->params.default_vlan_prio = 0;
  3029. priv->counter_set_id = -1;
  3030. priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
  3031. err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
  3032. if (err)
  3033. return (err);
  3034. /*
  3035. * hw lro is currently defaulted to off. when it won't anymore we
  3036. * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
  3037. */
  3038. priv->params.hw_lro_en = false;
  3039. priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
  3040. /*
  3041. * CQE zipping is currently defaulted to off. when it won't
  3042. * anymore we will consider the HW capability:
  3043. * "!!MLX5_CAP_GEN(mdev, cqe_compression)"
  3044. */
  3045. priv->params.cqe_zipping_en = false;
  3046. priv->mdev = mdev;
  3047. priv->params.num_channels = num_comp_vectors;
  3048. priv->params.channels_rsss = 1;
  3049. priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
  3050. priv->queue_mapping_channel_mask =
  3051. roundup_pow_of_two(num_comp_vectors) - 1;
  3052. priv->num_tc = priv->params.num_tc;
  3053. priv->default_vlan_prio = priv->params.default_vlan_prio;
  3054. INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
  3055. INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
  3056. INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
  3057. return (0);
  3058. }
  3059. static int
  3060. mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
  3061. struct mlx5_core_mr *mkey)
  3062. {
  3063. struct ifnet *ifp = priv->ifp;
  3064. struct mlx5_core_dev *mdev = priv->mdev;
  3065. int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
  3066. void *mkc;
  3067. u32 *in;
  3068. int err;
  3069. in = mlx5_vzalloc(inlen);
  3070. if (in == NULL) {
  3071. mlx5_en_err(ifp, "failed to allocate inbox\n");
  3072. return (-ENOMEM);
  3073. }
  3074. mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
  3075. MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
  3076. MLX5_SET(mkc, mkc, lw, 1);
  3077. MLX5_SET(mkc, mkc, lr, 1);
  3078. MLX5_SET(mkc, mkc, pd, pdn);
  3079. MLX5_SET(mkc, mkc, length64, 1);
  3080. MLX5_SET(mkc, mkc, qpn, 0xffffff);
  3081. err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
  3082. if (err)
  3083. mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n",
  3084. err);
  3085. kvfree(in);
  3086. return (err);
  3087. }
  3088. static const char *mlx5e_vport_stats_desc[] = {
  3089. MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
  3090. };
  3091. static const char *mlx5e_pport_stats_desc[] = {
  3092. MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
  3093. };
  3094. static void
  3095. mlx5e_priv_static_init(struct mlx5e_priv *priv, const uint32_t channels)
  3096. {
  3097. uint32_t x;
  3098. mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
  3099. sx_init(&priv->state_lock, "mlx5state");
  3100. callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
  3101. MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
  3102. for (x = 0; x != channels; x++)
  3103. mlx5e_chan_static_init(priv, &priv->channel[x], x);
  3104. }
  3105. static void
  3106. mlx5e_priv_static_destroy(struct mlx5e_priv *priv, const uint32_t channels)
  3107. {
  3108. uint32_t x;
  3109. for (x = 0; x != channels; x++)
  3110. mlx5e_chan_static_destroy(&priv->channel[x]);
  3111. callout_drain(&priv->watchdog);
  3112. mtx_destroy(&priv->async_events_mtx);
  3113. sx_destroy(&priv->state_lock);
  3114. }
  3115. static int
  3116. sysctl_firmware(SYSCTL_HANDLER_ARGS)
  3117. {
  3118. /*
  3119. * %d.%d%.d the string format.
  3120. * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
  3121. * We need at most 5 chars to store that.
  3122. * It also has: two "." and NULL at the end, which means we need 18
  3123. * (5*3 + 3) chars at most.
  3124. */
  3125. char fw[18];
  3126. struct mlx5e_priv *priv = arg1;
  3127. int error;
  3128. snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
  3129. fw_rev_sub(priv->mdev));
  3130. error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
  3131. return (error);
  3132. }
  3133. static void
  3134. mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
  3135. {
  3136. int i;
  3137. for (i = 0; i < ch->priv->num_tc; i++)
  3138. mlx5e_drain_sq(&ch->sq[i]);
  3139. }
  3140. static void
  3141. mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
  3142. {
  3143. sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
  3144. sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
  3145. mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
  3146. sq->doorbell.d64 = 0;
  3147. }
  3148. void
  3149. mlx5e_resume_sq(struct mlx5e_sq *sq)
  3150. {
  3151. int err;
  3152. /* check if already enabled */
  3153. if (READ_ONCE(sq->running) != 0)
  3154. return;
  3155. err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
  3156. MLX5_SQC_STATE_RST);
  3157. if (err != 0) {
  3158. mlx5_en_err(sq->ifp,
  3159. "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
  3160. }
  3161. sq->cc = 0;
  3162. sq->pc = 0;
  3163. /* reset doorbell prior to moving from RST to RDY */
  3164. mlx5e_reset_sq_doorbell_record(sq);
  3165. err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
  3166. MLX5_SQC_STATE_RDY);
  3167. if (err != 0) {
  3168. mlx5_en_err(sq->ifp,
  3169. "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
  3170. }
  3171. sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
  3172. WRITE_ONCE(sq->running, 1);
  3173. }
  3174. static void
  3175. mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
  3176. {
  3177. int i;
  3178. for (i = 0; i < ch->priv->num_tc; i++)
  3179. mlx5e_resume_sq(&ch->sq[i]);
  3180. }
  3181. static void
  3182. mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
  3183. {
  3184. struct mlx5e_rq *rq = &ch->rq;
  3185. int err;
  3186. mtx_lock(&rq->mtx);
  3187. rq->enabled = 0;
  3188. callout_stop(&rq->watchdog);
  3189. mtx_unlock(&rq->mtx);
  3190. err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
  3191. if (err != 0) {
  3192. mlx5_en_err(rq->ifp,
  3193. "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
  3194. }
  3195. while (!mlx5_wq_ll_is_empty(&rq->wq)) {
  3196. msleep(1);
  3197. rq->cq.mcq.comp(&rq->cq.mcq);
  3198. }
  3199. /*
  3200. * Transitioning into RST state will allow the FW to track less ERR state queues,
  3201. * thus reducing the recv queue flushing time
  3202. */
  3203. err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
  3204. if (err != 0) {
  3205. mlx5_en_err(rq->ifp,
  3206. "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
  3207. }
  3208. }
  3209. static void
  3210. mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
  3211. {
  3212. struct mlx5e_rq *rq = &ch->rq;
  3213. int err;
  3214. rq->wq.wqe_ctr = 0;
  3215. mlx5_wq_ll_update_db_record(&rq->wq);
  3216. err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
  3217. if (err != 0) {
  3218. mlx5_en_err(rq->ifp,
  3219. "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
  3220. }
  3221. rq->enabled = 1;
  3222. rq->cq.mcq.comp(&rq->cq.mcq);
  3223. }
  3224. void
  3225. mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
  3226. {
  3227. int i;
  3228. if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
  3229. return;
  3230. for (i = 0; i < priv->params.num_channels; i++) {
  3231. if (value)
  3232. mlx5e_disable_tx_dma(&priv->channel[i]);
  3233. else
  3234. mlx5e_enable_tx_dma(&priv->channel[i]);
  3235. }
  3236. }
  3237. void
  3238. mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
  3239. {
  3240. int i;
  3241. if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
  3242. return;
  3243. for (i = 0; i < priv->params.num_channels; i++) {
  3244. if (value)
  3245. mlx5e_disable_rx_dma(&priv->channel[i]);
  3246. else
  3247. mlx5e_enable_rx_dma(&priv->channel[i]);
  3248. }
  3249. }
  3250. static void
  3251. mlx5e_add_hw_stats(struct mlx5e_priv *priv)
  3252. {
  3253. SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
  3254. OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
  3255. sysctl_firmware, "A", "HCA firmware version");
  3256. SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
  3257. OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
  3258. "Board ID");
  3259. }
  3260. static int
  3261. mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
  3262. {
  3263. struct mlx5e_priv *priv = arg1;
  3264. uint8_t temp[MLX5E_MAX_PRIORITY];
  3265. uint32_t tx_pfc;
  3266. int err;
  3267. int i;
  3268. PRIV_LOCK(priv);
  3269. tx_pfc = priv->params.tx_priority_flow_control;
  3270. for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
  3271. temp[i] = (tx_pfc >> i) & 1;
  3272. err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
  3273. if (err || !req->newptr)
  3274. goto done;
  3275. err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
  3276. if (err)
  3277. goto done;
  3278. priv->params.tx_priority_flow_control = 0;
  3279. /* range check input value */
  3280. for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
  3281. if (temp[i] > 1) {
  3282. err = ERANGE;
  3283. goto done;
  3284. }
  3285. priv->params.tx_priority_flow_control |= (temp[i] << i);
  3286. }
  3287. /* check if update is required */
  3288. if (tx_pfc != priv->params.tx_priority_flow_control)
  3289. err = -mlx5e_set_port_pfc(priv);
  3290. done:
  3291. if (err != 0)
  3292. priv->params.tx_priority_flow_control= tx_pfc;
  3293. PRIV_UNLOCK(priv);
  3294. return (err);
  3295. }
  3296. static int
  3297. mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
  3298. {
  3299. struct mlx5e_priv *priv = arg1;
  3300. uint8_t temp[MLX5E_MAX_PRIORITY];
  3301. uint32_t rx_pfc;
  3302. int err;
  3303. int i;
  3304. PRIV_LOCK(priv);
  3305. rx_pfc = priv->params.rx_priority_flow_control;
  3306. for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
  3307. temp[i] = (rx_pfc >> i) & 1;
  3308. err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
  3309. if (err || !req->newptr)
  3310. goto done;
  3311. err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
  3312. if (err)
  3313. goto done;
  3314. priv->params.rx_priority_flow_control = 0;
  3315. /* range check input value */
  3316. for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
  3317. if (temp[i] > 1) {
  3318. err = ERANGE;
  3319. goto done;
  3320. }
  3321. priv->params.rx_priority_flow_control |= (temp[i] << i);
  3322. }
  3323. /* check if update is required */
  3324. if (rx_pfc != priv->params.rx_priority_flow_control) {
  3325. err = -mlx5e_set_port_pfc(priv);
  3326. if (err == 0 && priv->sw_is_port_buf_owner)
  3327. err = mlx5e_update_buf_lossy(priv);
  3328. }
  3329. done:
  3330. if (err != 0)
  3331. priv->params.rx_priority_flow_control= rx_pfc;
  3332. PRIV_UNLOCK(priv);
  3333. return (err);
  3334. }
  3335. static void
  3336. mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
  3337. {
  3338. #if (__FreeBSD_version < 1100000)
  3339. char path[96];
  3340. #endif
  3341. int error;
  3342. /* enable pauseframes by default */
  3343. priv->params.tx_pauseframe_control = 1;
  3344. priv->params.rx_pauseframe_control = 1;
  3345. /* disable ports flow control, PFC, by default */
  3346. priv->params.tx_priority_flow_control = 0;
  3347. priv->params.rx_priority_flow_control = 0;
  3348. #if (__FreeBSD_version < 1100000)
  3349. /* compute path for sysctl */
  3350. snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
  3351. device_get_unit(priv->mdev->pdev->dev.bsddev));
  3352. /* try to fetch tunable, if any */
  3353. TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
  3354. /* compute path for sysctl */
  3355. snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
  3356. device_get_unit(priv->mdev->pdev->dev.bsddev));
  3357. /* try to fetch tunable, if any */
  3358. TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
  3359. #endif
  3360. /* register pauseframe SYSCTLs */
  3361. SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3362. OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
  3363. &priv->params.tx_pauseframe_control, 0,
  3364. "Set to enable TX pause frames. Clear to disable.");
  3365. SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3366. OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
  3367. &priv->params.rx_pauseframe_control, 0,
  3368. "Set to enable RX pause frames. Clear to disable.");
  3369. /* register priority flow control, PFC, SYSCTLs */
  3370. SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3371. OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
  3372. CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU",
  3373. "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable.");
  3374. SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3375. OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
  3376. CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU",
  3377. "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable.");
  3378. PRIV_LOCK(priv);
  3379. /* range check */
  3380. priv->params.tx_pauseframe_control =
  3381. priv->params.tx_pauseframe_control ? 1 : 0;
  3382. priv->params.rx_pauseframe_control =
  3383. priv->params.rx_pauseframe_control ? 1 : 0;
  3384. /* update firmware */
  3385. error = mlx5e_set_port_pause_and_pfc(priv);
  3386. if (error == -EINVAL) {
  3387. mlx5_en_err(priv->ifp,
  3388. "Global pauseframes must be disabled before enabling PFC.\n");
  3389. priv->params.rx_priority_flow_control = 0;
  3390. priv->params.tx_priority_flow_control = 0;
  3391. /* update firmware */
  3392. (void) mlx5e_set_port_pause_and_pfc(priv);
  3393. }
  3394. PRIV_UNLOCK(priv);
  3395. }
  3396. static int
  3397. mlx5e_ul_snd_tag_alloc(struct ifnet *ifp,
  3398. union if_snd_tag_alloc_params *params,
  3399. struct m_snd_tag **ppmt)
  3400. {
  3401. struct mlx5e_priv *priv;
  3402. struct mlx5e_channel *pch;
  3403. priv = ifp->if_softc;
  3404. if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) {
  3405. return (EOPNOTSUPP);
  3406. } else {
  3407. /* keep this code synced with mlx5e_select_queue() */
  3408. u32 ch = priv->params.num_channels;
  3409. #ifdef RSS
  3410. u32 temp;
  3411. if (rss_hash2bucket(params->hdr.flowid,
  3412. params->hdr.flowtype, &temp) == 0)
  3413. ch = temp % ch;
  3414. else
  3415. #endif
  3416. ch = (params->hdr.flowid % 128) % ch;
  3417. /*
  3418. * NOTE: The channels array is only freed at detach
  3419. * and it safe to return a pointer to the send tag
  3420. * inside the channels structure as long as we
  3421. * reference the priv.
  3422. */
  3423. pch = priv->channel + ch;
  3424. /* check if send queue is not running */
  3425. if (unlikely(pch->sq[0].running == 0))
  3426. return (ENXIO);
  3427. mlx5e_ref_channel(priv);
  3428. *ppmt = &pch->tag.m_snd_tag;
  3429. return (0);
  3430. }
  3431. }
  3432. static int
  3433. mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
  3434. {
  3435. struct mlx5e_channel *pch =
  3436. container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
  3437. params->unlimited.max_rate = -1ULL;
  3438. params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]);
  3439. return (0);
  3440. }
  3441. static void
  3442. mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
  3443. {
  3444. struct mlx5e_channel *pch =
  3445. container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
  3446. mlx5e_unref_channel(pch->priv);
  3447. }
  3448. static int
  3449. mlx5e_snd_tag_alloc(struct ifnet *ifp,
  3450. union if_snd_tag_alloc_params *params,
  3451. struct m_snd_tag **ppmt)
  3452. {
  3453. switch (params->hdr.type) {
  3454. #ifdef RATELIMIT
  3455. case IF_SND_TAG_TYPE_RATE_LIMIT:
  3456. return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt));
  3457. #endif
  3458. case IF_SND_TAG_TYPE_UNLIMITED:
  3459. return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt));
  3460. default:
  3461. return (EOPNOTSUPP);
  3462. }
  3463. }
  3464. static int
  3465. mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
  3466. {
  3467. struct mlx5e_snd_tag *tag =
  3468. container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
  3469. switch (tag->type) {
  3470. #ifdef RATELIMIT
  3471. case IF_SND_TAG_TYPE_RATE_LIMIT:
  3472. return (mlx5e_rl_snd_tag_modify(pmt, params));
  3473. #endif
  3474. case IF_SND_TAG_TYPE_UNLIMITED:
  3475. default:
  3476. return (EOPNOTSUPP);
  3477. }
  3478. }
  3479. static int
  3480. mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
  3481. {
  3482. struct mlx5e_snd_tag *tag =
  3483. container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
  3484. switch (tag->type) {
  3485. #ifdef RATELIMIT
  3486. case IF_SND_TAG_TYPE_RATE_LIMIT:
  3487. return (mlx5e_rl_snd_tag_query(pmt, params));
  3488. #endif
  3489. case IF_SND_TAG_TYPE_UNLIMITED:
  3490. return (mlx5e_ul_snd_tag_query(pmt, params));
  3491. default:
  3492. return (EOPNOTSUPP);
  3493. }
  3494. }
  3495. static void
  3496. mlx5e_snd_tag_free(struct m_snd_tag *pmt)
  3497. {
  3498. struct mlx5e_snd_tag *tag =
  3499. container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
  3500. switch (tag->type) {
  3501. #ifdef RATELIMIT
  3502. case IF_SND_TAG_TYPE_RATE_LIMIT:
  3503. mlx5e_rl_snd_tag_free(pmt);
  3504. break;
  3505. #endif
  3506. case IF_SND_TAG_TYPE_UNLIMITED:
  3507. mlx5e_ul_snd_tag_free(pmt);
  3508. break;
  3509. default:
  3510. break;
  3511. }
  3512. }
  3513. static void *
  3514. mlx5e_create_ifp(struct mlx5_core_dev *mdev)
  3515. {
  3516. struct ifnet *ifp;
  3517. struct mlx5e_priv *priv;
  3518. u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
  3519. u8 connector_type;
  3520. struct sysctl_oid_list *child;
  3521. int ncv = mdev->priv.eq_table.num_comp_vectors;
  3522. char unit[16];
  3523. int err;
  3524. int i,j;
  3525. u32 eth_proto_cap;
  3526. u32 out[MLX5_ST_SZ_DW(ptys_reg)];
  3527. bool ext = 0;
  3528. u32 speeds_num;
  3529. struct media media_entry = {};
  3530. if (mlx5e_check_required_hca_cap(mdev)) {
  3531. mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
  3532. return (NULL);
  3533. }
  3534. /*
  3535. * Try to allocate the priv and make room for worst-case
  3536. * number of channel structures:
  3537. */
  3538. priv = malloc(sizeof(*priv) +
  3539. (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
  3540. M_MLX5EN, M_WAITOK | M_ZERO);
  3541. ifp = priv->ifp = if_alloc(IFT_ETHER);
  3542. if (ifp == NULL) {
  3543. mlx5_core_err(mdev, "if_alloc() failed\n");
  3544. goto err_free_priv;
  3545. }
  3546. /* setup all static fields */
  3547. mlx5e_priv_static_init(priv, mdev->priv.eq_table.num_comp_vectors);
  3548. ifp->if_softc = priv;
  3549. if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
  3550. ifp->if_mtu = ETHERMTU;
  3551. ifp->if_init = mlx5e_open;
  3552. ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  3553. ifp->if_ioctl = mlx5e_ioctl;
  3554. ifp->if_transmit = mlx5e_xmit;
  3555. ifp->if_qflush = if_qflush;
  3556. #if (__FreeBSD_version >= 1100000)
  3557. ifp->if_get_counter = mlx5e_get_counter;
  3558. #endif
  3559. ifp->if_snd.ifq_maxlen = ifqmaxlen;
  3560. /*
  3561. * Set driver features
  3562. */
  3563. ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
  3564. ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
  3565. ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
  3566. ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
  3567. ifp->if_capabilities |= IFCAP_LRO;
  3568. ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
  3569. ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP;
  3570. ifp->if_capabilities |= IFCAP_TXRTLMT;
  3571. ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc;
  3572. ifp->if_snd_tag_free = mlx5e_snd_tag_free;
  3573. ifp->if_snd_tag_modify = mlx5e_snd_tag_modify;
  3574. ifp->if_snd_tag_query = mlx5e_snd_tag_query;
  3575. /* set TSO limits so that we don't have to drop TX packets */
  3576. ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
  3577. ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
  3578. ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
  3579. ifp->if_capenable = ifp->if_capabilities;
  3580. ifp->if_hwassist = 0;
  3581. if (ifp->if_capenable & IFCAP_TSO)
  3582. ifp->if_hwassist |= CSUM_TSO;
  3583. if (ifp->if_capenable & IFCAP_TXCSUM)
  3584. ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
  3585. if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
  3586. ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
  3587. /* ifnet sysctl tree */
  3588. sysctl_ctx_init(&priv->sysctl_ctx);
  3589. priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
  3590. OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
  3591. if (priv->sysctl_ifnet == NULL) {
  3592. mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
  3593. goto err_free_sysctl;
  3594. }
  3595. snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
  3596. priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3597. OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
  3598. if (priv->sysctl_ifnet == NULL) {
  3599. mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
  3600. goto err_free_sysctl;
  3601. }
  3602. /* HW sysctl tree */
  3603. child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
  3604. priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
  3605. OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
  3606. if (priv->sysctl_hw == NULL) {
  3607. mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
  3608. goto err_free_sysctl;
  3609. }
  3610. err = mlx5e_build_ifp_priv(mdev, priv, ncv);
  3611. if (err) {
  3612. mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
  3613. goto err_free_sysctl;
  3614. }
  3615. /* reuse mlx5core's watchdog workqueue */
  3616. priv->wq = mdev->priv.health.wq_watchdog;
  3617. err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
  3618. if (err) {
  3619. mlx5_en_err(ifp, "mlx5_alloc_map_uar failed, %d\n", err);
  3620. goto err_free_wq;
  3621. }
  3622. err = mlx5_core_alloc_pd(mdev, &priv->pdn);
  3623. if (err) {
  3624. mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err);
  3625. goto err_unmap_free_uar;
  3626. }
  3627. err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
  3628. if (err) {
  3629. mlx5_en_err(ifp,
  3630. "mlx5_alloc_transport_domain failed, %d\n", err);
  3631. goto err_dealloc_pd;
  3632. }
  3633. err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
  3634. if (err) {
  3635. mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err);
  3636. goto err_dealloc_transport_domain;
  3637. }
  3638. mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
  3639. /* check if we should generate a random MAC address */
  3640. if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
  3641. is_zero_ether_addr(dev_addr)) {
  3642. random_ether_addr(dev_addr);
  3643. mlx5_en_err(ifp, "Assigned random MAC address\n");
  3644. }
  3645. #ifdef RATELIMIT
  3646. err = mlx5e_rl_init(priv);
  3647. if (err) {
  3648. mlx5_en_err(ifp, "mlx5e_rl_init failed, %d\n", err);
  3649. goto err_create_mkey;
  3650. }
  3651. #endif
  3652. /* set default MTU */
  3653. mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
  3654. /* Set default media status */
  3655. priv->media_status_last = IFM_AVALID;
  3656. priv->media_active_last = IFM_ETHER | IFM_AUTO |
  3657. IFM_ETH_RXPAUSE | IFM_FDX;
  3658. /* setup default pauseframes configuration */
  3659. mlx5e_setup_pauseframes(priv);
  3660. /* Setup supported medias */
  3661. //TODO: If we failed to query ptys is it ok to proceed??
  3662. if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) {
  3663. ext = MLX5_CAP_PCAM_FEATURE(mdev,
  3664. ptys_extended_ethernet);
  3665. eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
  3666. eth_proto_capability);
  3667. if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
  3668. connector_type = MLX5_GET(ptys_reg, out,
  3669. connector_type);
  3670. } else {
  3671. eth_proto_cap = 0;
  3672. mlx5_en_err(ifp, "Query port media capability failed, %d\n", err);
  3673. }
  3674. ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
  3675. mlx5e_media_change, mlx5e_media_status);
  3676. speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER;
  3677. for (i = 0; i != speeds_num; i++) {
  3678. for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) {
  3679. media_entry = ext ? mlx5e_ext_mode_table[i][j] :
  3680. mlx5e_mode_table[i][j];
  3681. if (media_entry.baudrate == 0)
  3682. continue;
  3683. if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
  3684. ifmedia_add(&priv->media,
  3685. media_entry.subtype |
  3686. IFM_ETHER, 0, NULL);
  3687. ifmedia_add(&priv->media,
  3688. media_entry.subtype |
  3689. IFM_ETHER | IFM_FDX |
  3690. IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
  3691. }
  3692. }
  3693. }
  3694. ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
  3695. ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
  3696. IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
  3697. /* Set autoselect by default */
  3698. ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
  3699. IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
  3700. ether_ifattach(ifp, dev_addr);
  3701. /* Register for VLAN events */
  3702. priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
  3703. mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
  3704. priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
  3705. mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
  3706. /* Link is down by default */
  3707. if_link_state_change(ifp, LINK_STATE_DOWN);
  3708. mlx5e_enable_async_events(priv);
  3709. mlx5e_add_hw_stats(priv);
  3710. mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3711. "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
  3712. priv->stats.vport.arg);
  3713. mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3714. "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
  3715. priv->stats.pport.arg);
  3716. mlx5e_create_ethtool(priv);
  3717. mtx_lock(&priv->async_events_mtx);
  3718. mlx5e_update_stats(priv);
  3719. mtx_unlock(&priv->async_events_mtx);
  3720. SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
  3721. OID_AUTO, "rx_clbr_done", CTLFLAG_RD,
  3722. &priv->clbr_done, 0,
  3723. "RX timestamps calibration state");
  3724. callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT);
  3725. mlx5e_reset_calibration_callout(priv);
  3726. return (priv);
  3727. #ifdef RATELIMIT
  3728. err_create_mkey:
  3729. mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
  3730. #endif
  3731. err_dealloc_transport_domain:
  3732. mlx5_dealloc_transport_domain(mdev, priv->tdn);
  3733. err_dealloc_pd:
  3734. mlx5_core_dealloc_pd(mdev, priv->pdn);
  3735. err_unmap_free_uar:
  3736. mlx5_unmap_free_uar(mdev, &priv->cq_uar);
  3737. err_free_wq:
  3738. flush_workqueue(priv->wq);
  3739. err_free_sysctl:
  3740. sysctl_ctx_free(&priv->sysctl_ctx);
  3741. if (priv->sysctl_debug)
  3742. sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
  3743. mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors);
  3744. if_free(ifp);
  3745. err_free_priv:
  3746. free(priv, M_MLX5EN);
  3747. return (NULL);
  3748. }
  3749. static void
  3750. mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
  3751. {
  3752. struct mlx5e_priv *priv = vpriv;
  3753. struct ifnet *ifp = priv->ifp;
  3754. /* don't allow more IOCTLs */
  3755. priv->gone = 1;
  3756. /* XXX wait a bit to allow IOCTL handlers to complete */
  3757. pause("W", hz);
  3758. #ifdef RATELIMIT
  3759. /*
  3760. * The kernel can have reference(s) via the m_snd_tag's into
  3761. * the ratelimit channels, and these must go away before
  3762. * detaching:
  3763. */
  3764. while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) {
  3765. mlx5_en_err(priv->ifp,
  3766. "Waiting for all ratelimit connections to terminate\n");
  3767. pause("W", hz);
  3768. }
  3769. #endif
  3770. /* stop watchdog timer */
  3771. callout_drain(&priv->watchdog);
  3772. callout_drain(&priv->tstmp_clbr);
  3773. if (priv->vlan_attach != NULL)
  3774. EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
  3775. if (priv->vlan_detach != NULL)
  3776. EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
  3777. /* make sure device gets closed */
  3778. PRIV_LOCK(priv);
  3779. mlx5e_close_locked(ifp);
  3780. PRIV_UNLOCK(priv);
  3781. /* wait for all unlimited send tags to go away */
  3782. while (priv->channel_refs != 0) {
  3783. mlx5_en_err(priv->ifp,
  3784. "Waiting for all unlimited connections to terminate\n");
  3785. pause("W", hz);
  3786. }
  3787. /* unregister device */
  3788. ifmedia_removeall(&priv->media);
  3789. ether_ifdetach(ifp);
  3790. #ifdef RATELIMIT
  3791. mlx5e_rl_cleanup(priv);
  3792. #endif
  3793. /* destroy all remaining sysctl nodes */
  3794. sysctl_ctx_free(&priv->stats.vport.ctx);
  3795. sysctl_ctx_free(&priv->stats.pport.ctx);
  3796. if (priv->sysctl_debug)
  3797. sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
  3798. sysctl_ctx_free(&priv->sysctl_ctx);
  3799. mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
  3800. mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
  3801. mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
  3802. mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
  3803. mlx5e_disable_async_events(priv);
  3804. flush_workqueue(priv->wq);
  3805. mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors);
  3806. if_free(ifp);
  3807. free(priv, M_MLX5EN);
  3808. }
  3809. static void *
  3810. mlx5e_get_ifp(void *vpriv)
  3811. {
  3812. struct mlx5e_priv *priv = vpriv;
  3813. return (priv->ifp);
  3814. }
  3815. static struct mlx5_interface mlx5e_interface = {
  3816. .add = mlx5e_create_ifp,
  3817. .remove = mlx5e_destroy_ifp,
  3818. .event = mlx5e_async_event,
  3819. .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
  3820. .get_dev = mlx5e_get_ifp,
  3821. };
  3822. void
  3823. mlx5e_init(void)
  3824. {
  3825. mlx5_register_interface(&mlx5e_interface);
  3826. }
  3827. void
  3828. mlx5e_cleanup(void)
  3829. {
  3830. mlx5_unregister_interface(&mlx5e_interface);
  3831. }
  3832. static void
  3833. mlx5e_show_version(void __unused *arg)
  3834. {
  3835. printf("%s", mlx5e_version);
  3836. }
  3837. SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
  3838. module_init_order(mlx5e_init, SI_ORDER_THIRD);
  3839. module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
  3840. #if (__FreeBSD_version >= 1100000)
  3841. MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
  3842. #endif
  3843. MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
  3844. MODULE_VERSION(mlx5en, 1);