HardenedBSD src tree https://hardenedbsd.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

375 lines
11 KiB

  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice unmodified, this list of conditions, and the following
  12. * disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. *
  28. * $FreeBSD$
  29. */
  30. #ifndef _IF_VTNETVAR_H
  31. #define _IF_VTNETVAR_H
  32. struct vtnet_softc;
  33. struct vtnet_statistics {
  34. uint64_t mbuf_alloc_failed;
  35. uint64_t rx_frame_too_large;
  36. uint64_t rx_enq_replacement_failed;
  37. uint64_t rx_mergeable_failed;
  38. uint64_t rx_csum_bad_ethtype;
  39. uint64_t rx_csum_bad_ipproto;
  40. uint64_t rx_csum_bad_offset;
  41. uint64_t rx_csum_bad_proto;
  42. uint64_t tx_csum_bad_ethtype;
  43. uint64_t tx_tso_bad_ethtype;
  44. uint64_t tx_tso_not_tcp;
  45. uint64_t tx_defragged;
  46. uint64_t tx_defrag_failed;
  47. /*
  48. * These are accumulated from each Rx/Tx queue.
  49. */
  50. uint64_t rx_csum_failed;
  51. uint64_t rx_csum_offloaded;
  52. uint64_t rx_task_rescheduled;
  53. uint64_t tx_csum_offloaded;
  54. uint64_t tx_tso_offloaded;
  55. uint64_t tx_task_rescheduled;
  56. };
  57. struct vtnet_rxq_stats {
  58. uint64_t vrxs_ipackets; /* if_ipackets */
  59. uint64_t vrxs_ibytes; /* if_ibytes */
  60. uint64_t vrxs_iqdrops; /* if_iqdrops */
  61. uint64_t vrxs_ierrors; /* if_ierrors */
  62. uint64_t vrxs_csum;
  63. uint64_t vrxs_csum_failed;
  64. uint64_t vrxs_rescheduled;
  65. };
  66. struct vtnet_rxq {
  67. struct mtx vtnrx_mtx;
  68. struct vtnet_softc *vtnrx_sc;
  69. struct virtqueue *vtnrx_vq;
  70. struct sglist *vtnrx_sg;
  71. int vtnrx_id;
  72. struct vtnet_rxq_stats vtnrx_stats;
  73. struct taskqueue *vtnrx_tq;
  74. struct task vtnrx_intrtask;
  75. #ifdef DEV_NETMAP
  76. uint32_t vtnrx_nm_refill;
  77. struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
  78. #endif /* DEV_NETMAP */
  79. char vtnrx_name[16];
  80. } __aligned(CACHE_LINE_SIZE);
  81. #define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx)
  82. #define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx)
  83. #define VTNET_RXQ_LOCK_ASSERT(_rxq) \
  84. mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED)
  85. #define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \
  86. mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED)
  87. struct vtnet_txq_stats {
  88. uint64_t vtxs_opackets; /* if_opackets */
  89. uint64_t vtxs_obytes; /* if_obytes */
  90. uint64_t vtxs_omcasts; /* if_omcasts */
  91. uint64_t vtxs_csum;
  92. uint64_t vtxs_tso;
  93. uint64_t vtxs_rescheduled;
  94. };
  95. struct vtnet_txq {
  96. struct mtx vtntx_mtx;
  97. struct vtnet_softc *vtntx_sc;
  98. struct virtqueue *vtntx_vq;
  99. struct sglist *vtntx_sg;
  100. #ifndef VTNET_LEGACY_TX
  101. struct buf_ring *vtntx_br;
  102. #endif
  103. int vtntx_id;
  104. int vtntx_watchdog;
  105. struct vtnet_txq_stats vtntx_stats;
  106. struct taskqueue *vtntx_tq;
  107. struct task vtntx_intrtask;
  108. #ifndef VTNET_LEGACY_TX
  109. struct task vtntx_defrtask;
  110. #endif
  111. #ifdef DEV_NETMAP
  112. struct virtio_net_hdr_mrg_rxbuf vtntx_shrhdr;
  113. #endif /* DEV_NETMAP */
  114. char vtntx_name[16];
  115. } __aligned(CACHE_LINE_SIZE);
  116. #define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx)
  117. #define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx)
  118. #define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx)
  119. #define VTNET_TXQ_LOCK_ASSERT(_txq) \
  120. mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED)
  121. #define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
  122. mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED)
  123. struct vtnet_softc {
  124. device_t vtnet_dev;
  125. struct ifnet *vtnet_ifp;
  126. struct vtnet_rxq *vtnet_rxqs;
  127. struct vtnet_txq *vtnet_txqs;
  128. uint32_t vtnet_flags;
  129. #define VTNET_FLAG_SUSPENDED 0x0001
  130. #define VTNET_FLAG_MAC 0x0002
  131. #define VTNET_FLAG_CTRL_VQ 0x0004
  132. #define VTNET_FLAG_CTRL_RX 0x0008
  133. #define VTNET_FLAG_CTRL_MAC 0x0010
  134. #define VTNET_FLAG_VLAN_FILTER 0x0020
  135. #define VTNET_FLAG_TSO_ECN 0x0040
  136. #define VTNET_FLAG_MRG_RXBUFS 0x0080
  137. #define VTNET_FLAG_LRO_NOMRG 0x0100
  138. #define VTNET_FLAG_MULTIQ 0x0200
  139. #define VTNET_FLAG_INDIRECT 0x0400
  140. #define VTNET_FLAG_EVENT_IDX 0x0800
  141. int vtnet_link_active;
  142. int vtnet_hdr_size;
  143. int vtnet_rx_process_limit;
  144. int vtnet_rx_nsegs;
  145. int vtnet_rx_nmbufs;
  146. int vtnet_rx_clsize;
  147. int vtnet_rx_new_clsize;
  148. int vtnet_tx_intr_thresh;
  149. int vtnet_tx_nsegs;
  150. int vtnet_if_flags;
  151. int vtnet_act_vq_pairs;
  152. int vtnet_max_vq_pairs;
  153. int vtnet_requested_vq_pairs;
  154. struct virtqueue *vtnet_ctrl_vq;
  155. struct vtnet_mac_filter *vtnet_mac_filter;
  156. uint32_t *vtnet_vlan_filter;
  157. uint64_t vtnet_features;
  158. struct vtnet_statistics vtnet_stats;
  159. struct callout vtnet_tick_ch;
  160. struct ifmedia vtnet_media;
  161. eventhandler_tag vtnet_vlan_attach;
  162. eventhandler_tag vtnet_vlan_detach;
  163. struct mtx vtnet_mtx;
  164. char vtnet_mtx_name[16];
  165. char vtnet_hwaddr[ETHER_ADDR_LEN];
  166. };
  167. /*
  168. * Maximum number of queue pairs we will autoconfigure to.
  169. */
  170. #define VTNET_MAX_QUEUE_PAIRS 8
  171. /*
  172. * Additional completed entries can appear in a virtqueue before we can
  173. * reenable interrupts. Number of times to retry before scheduling the
  174. * taskqueue to process the completed entries.
  175. */
  176. #define VTNET_INTR_DISABLE_RETRIES 4
  177. /*
  178. * Similarly, additional completed entries can appear in a virtqueue
  179. * between when lasted checked and before notifying the host. Number
  180. * of times to retry before scheduling the taskqueue to process the
  181. * queue.
  182. */
  183. #define VTNET_NOTIFY_RETRIES 4
  184. /*
  185. * Fake the media type. The host does not provide us with any real media
  186. * information.
  187. */
  188. #define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX)
  189. /*
  190. * Number of words to allocate for the VLAN shadow table. There is one
  191. * bit for each VLAN.
  192. */
  193. #define VTNET_VLAN_FILTER_NWORDS (4096 / 32)
  194. /*
  195. * When mergeable buffers are not negotiated, the vtnet_rx_header structure
  196. * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
  197. * both keep the VirtIO header and the data non-contiguous and to keep the
  198. * frame's payload 4 byte aligned.
  199. *
  200. * When mergeable buffers are negotiated, the host puts the VirtIO header in
  201. * the beginning of the first mbuf's data.
  202. */
  203. #define VTNET_RX_HEADER_PAD 4
  204. struct vtnet_rx_header {
  205. struct virtio_net_hdr vrh_hdr;
  206. char vrh_pad[VTNET_RX_HEADER_PAD];
  207. } __packed;
  208. /*
  209. * For each outgoing frame, the vtnet_tx_header below is allocated from
  210. * the vtnet_tx_header_zone.
  211. */
  212. struct vtnet_tx_header {
  213. union {
  214. struct virtio_net_hdr hdr;
  215. struct virtio_net_hdr_mrg_rxbuf mhdr;
  216. } vth_uhdr;
  217. struct mbuf *vth_mbuf;
  218. };
  219. /*
  220. * The VirtIO specification does not place a limit on the number of MAC
  221. * addresses the guest driver may request to be filtered. In practice,
  222. * the host is constrained by available resources. To simplify this driver,
  223. * impose a reasonably high limit of MAC addresses we will filter before
  224. * falling back to promiscuous or all-multicast modes.
  225. */
  226. #define VTNET_MAX_MAC_ENTRIES 128
  227. struct vtnet_mac_table {
  228. uint32_t nentries;
  229. uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
  230. } __packed;
  231. struct vtnet_mac_filter {
  232. struct vtnet_mac_table vmf_unicast;
  233. uint32_t vmf_pad; /* Make tables non-contiguous. */
  234. struct vtnet_mac_table vmf_multicast;
  235. };
  236. /*
  237. * The MAC filter table is malloc(9)'d when needed. Ensure it will
  238. * always fit in one segment.
  239. */
  240. CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
  241. #define VTNET_TX_TIMEOUT 5
  242. #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
  243. #define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
  244. #define VTNET_CSUM_ALL_OFFLOAD \
  245. (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
  246. /* Features desired/implemented by this driver. */
  247. #define VTNET_FEATURES \
  248. (VIRTIO_NET_F_MAC | \
  249. VIRTIO_NET_F_STATUS | \
  250. VIRTIO_NET_F_CTRL_VQ | \
  251. VIRTIO_NET_F_CTRL_RX | \
  252. VIRTIO_NET_F_CTRL_MAC_ADDR | \
  253. VIRTIO_NET_F_CTRL_VLAN | \
  254. VIRTIO_NET_F_CSUM | \
  255. VIRTIO_NET_F_GSO | \
  256. VIRTIO_NET_F_HOST_TSO4 | \
  257. VIRTIO_NET_F_HOST_TSO6 | \
  258. VIRTIO_NET_F_HOST_ECN | \
  259. VIRTIO_NET_F_GUEST_CSUM | \
  260. VIRTIO_NET_F_GUEST_TSO4 | \
  261. VIRTIO_NET_F_GUEST_TSO6 | \
  262. VIRTIO_NET_F_GUEST_ECN | \
  263. VIRTIO_NET_F_MRG_RXBUF | \
  264. VIRTIO_NET_F_MQ | \
  265. VIRTIO_RING_F_EVENT_IDX | \
  266. VIRTIO_RING_F_INDIRECT_DESC)
  267. /*
  268. * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
  269. * frames larger than 1514 bytes.
  270. */
  271. #define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \
  272. VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN)
  273. /*
  274. * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
  275. * frames larger than 1514 bytes. We do not yet support software LRO
  276. * via tcp_lro_rx().
  277. */
  278. #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
  279. VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
  280. #define VTNET_MAX_MTU 65536
  281. #define VTNET_MAX_RX_SIZE 65550
  282. /*
  283. * Used to preallocate the Vq indirect descriptors. The first segment
  284. * is reserved for the header, except for mergeable buffers since the
  285. * header is placed inline with the data.
  286. */
  287. #define VTNET_MRG_RX_SEGS 1
  288. #define VTNET_MIN_RX_SEGS 2
  289. #define VTNET_MAX_RX_SEGS 34
  290. #define VTNET_MIN_TX_SEGS 32
  291. #define VTNET_MAX_TX_SEGS 64
  292. /*
  293. * Assert we can receive and transmit the maximum with regular
  294. * size clusters.
  295. */
  296. CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
  297. CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
  298. /*
  299. * Number of slots in the Tx bufrings. This value matches most other
  300. * multiqueue drivers.
  301. */
  302. #define VTNET_DEFAULT_BUFRING_SIZE 4096
  303. /*
  304. * Determine how many mbufs are in each receive buffer. For LRO without
  305. * mergeable buffers, we must allocate an mbuf chain large enough to
  306. * hold both the vtnet_rx_header and the maximum receivable data.
  307. */
  308. #define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \
  309. ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
  310. howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
  311. (_clsize))
  312. #define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx
  313. #define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc)))
  314. #define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc)))
  315. #define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc)))
  316. #define VTNET_CORE_LOCK_ASSERT(_sc) \
  317. mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED)
  318. #define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
  319. mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED)
  320. #define VTNET_CORE_LOCK_INIT(_sc) do { \
  321. snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \
  322. "%s", device_get_nameunit((_sc)->vtnet_dev)); \
  323. mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \
  324. "VTNET Core Lock", MTX_DEF); \
  325. } while (0)
  326. /*
  327. * Values for the init_mode argument of vtnet_init_locked().
  328. */
  329. #define VTNET_INIT_NETMAP_ENTER 1
  330. #define VTNET_INIT_NETMAP_EXIT 2
  331. #endif /* _IF_VTNETVAR_H */