Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86/pci Sync code with FreeBSD to support RSS
details: https://anonhg.NetBSD.org/src/rev/7d8b823b7d6a
branches: trunk
changeset: 349132:7d8b823b7d6a
user: hikaru <hikaru%NetBSD.org@localhost>
date: Fri Nov 25 05:29:54 2016 +0000
description:
Sync code with FreeBSD to support RSS
- Use MSI/MSI-X if it is available.
- Support TSO.
co-authored by k-nakahara
diffstat:
sys/arch/x86/pci/if_vmx.c | 3776 +++++++++++++++++++++++++++++++----------
sys/arch/x86/pci/if_vmxreg.h | 188 +-
2 files changed, 2933 insertions(+), 1031 deletions(-)
diffs (truncated from 4304 to 300 lines):
diff -r 390f4d5b7f6a -r 7d8b823b7d6a sys/arch/x86/pci/if_vmx.c
--- a/sys/arch/x86/pci/if_vmx.c Fri Nov 25 05:03:36 2016 +0000
+++ b/sys/arch/x86/pci/if_vmx.c Fri Nov 25 05:29:54 2016 +0000
@@ -1,8 +1,9 @@
-/* $NetBSD: if_vmx.c,v 1.7 2016/06/10 13:27:13 ozaki-r Exp $ */
+/* $NetBSD: if_vmx.c,v 1.8 2016/11/25 05:29:54 hikaru Exp $ */
/* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */
/*
* Copyright (c) 2013 Tsubai Masanari
+ * Copyright (c) 2013 Bryan Venteicher <bryanv%FreeBSD.org@localhost>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,9 +19,12 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.7 2016/06/10 13:27:13 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.8 2016/11/25 05:29:54 hikaru Exp $");
#include <sys/param.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/kmem.h>
#include <sys/bus.h>
#include <sys/device.h>
#include <sys/mbuf.h>
@@ -35,6 +39,7 @@
#include <netinet/in_systm.h> /* for <netinet/ip.h> */
#include <netinet/in.h> /* for <netinet/ip.h> */
#include <netinet/ip.h> /* for struct ip */
+#include <netinet/ip6.h> /* for struct ip6_hdr */
#include <netinet/tcp.h> /* for struct tcphdr */
#include <netinet/udp.h> /* for struct udphdr */
@@ -44,137 +49,446 @@
#include <arch/x86/pci/if_vmxreg.h>
-#define NRXQUEUE 1
-#define NTXQUEUE 1
-
-#define NTXDESC 128 /* tx ring size */
-#define NTXSEGS 8 /* tx descriptors per packet */
-#define NRXDESC 128
-#define NTXCOMPDESC NTXDESC
-#define NRXCOMPDESC (NRXDESC * 2) /* ring1 + ring2 */
-
#define VMXNET3_DRIVER_VERSION 0x00010000
+/*
+ * Max descriptors per Tx packet. We must limit the size of the
+ * any TSO packets based on the number of segments.
+ */
+#define VMXNET3_TX_MAXSEGS 32
+#define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES)
+
+/*
+ * Maximum support Tx segments size. The length field in the
+ * Tx descriptor is 14 bits.
+ */
+#define VMXNET3_TX_MAXSEGSIZE (1 << 14)
+
+/*
+ * The maximum number of Rx segments we accept.
+ */
+#define VMXNET3_MAX_RX_SEGS 0 /* no segments */
+
+/*
+ * Predetermined size of the multicast MACs filter table. If the
+ * number of multicast addresses exceeds this size, then the
+ * ALL_MULTI mode is use instead.
+ */
+#define VMXNET3_MULTICAST_MAX 32
+
+/*
+ * Our Tx watchdog timeout.
+ */
+#define VMXNET3_WATCHDOG_TIMEOUT 5
+
+/*
+ * IP protocols that we can perform Tx checksum offloading of.
+ */
+#define VMXNET3_CSUM_OFFLOAD \
+ (M_CSUM_TCPv4 | M_CSUM_UDPv4)
+#define VMXNET3_CSUM_OFFLOAD_IPV6 \
+ (M_CSUM_TCPv6 | M_CSUM_UDPv6)
+
+#define VMXNET3_CSUM_ALL_OFFLOAD \
+ (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)
+
+#define VMXNET3_RXRINGS_PERQ 2
+
+#define VMXNET3_CORE_LOCK(_sc) mutex_enter((_sc)->vmx_mtx)
+#define VMXNET3_CORE_UNLOCK(_sc) mutex_exit((_sc)->vmx_mtx)
+#define VMXNET3_CORE_LOCK_ASSERT(_sc) mutex_owned((_sc)->vmx_mtx)
+#define VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
+ (!mutex_owned((_sc)->vmx_mtx))
+
+#define VMXNET3_RXQ_LOCK(_rxq) mutex_enter((_rxq)->vxrxq_mtx)
+#define VMXNET3_RXQ_UNLOCK(_rxq) mutex_exit((_rxq)->vxrxq_mtx)
+#define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \
+ mutex_owned((_rxq)->vxrxq_mtx)
+#define VMXNET3_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \
+ (!mutex_owned((_rxq)->vxrxq_mtx))
+
+#define VMXNET3_TXQ_LOCK(_txq) mutex_enter((_txq)->vxtxq_mtx)
+#define VMXNET3_TXQ_UNLOCK(_txq) mutex_exit((_txq)->vxtxq_mtx)
+#define VMXNET3_TXQ_LOCK_ASSERT(_txq) \
+ mutex_owned((_txq)->vxtxq_mtx)
+#define VMXNET3_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
+ (!mutex_owned((_txq)->vxtxq_mtx))
+
+struct vmxnet3_dma_alloc {
+ bus_addr_t dma_paddr;
+ void *dma_vaddr;
+ bus_dmamap_t dma_map;
+ bus_size_t dma_size;
+ bus_dma_segment_t dma_segs[1];
+};
+
+struct vmxnet3_txbuf {
+ bus_dmamap_t vtxb_dmamap;
+ struct mbuf *vtxb_m;
+};
+
struct vmxnet3_txring {
- struct mbuf *m[NTXDESC];
- bus_dmamap_t dmap[NTXDESC];
- struct vmxnet3_txdesc *txd;
- u_int head;
- u_int next;
- uint8_t gen;
+ struct vmxnet3_txbuf *vxtxr_txbuf;
+ struct vmxnet3_txdesc *vxtxr_txd;
+ u_int vxtxr_head;
+ u_int vxtxr_next;
+ u_int vxtxr_ndesc;
+ int vxtxr_gen;
+ struct vmxnet3_dma_alloc vxtxr_dma;
+};
+
+struct vmxnet3_rxbuf {
+ bus_dmamap_t vrxb_dmamap;
+ struct mbuf *vrxb_m;
};
struct vmxnet3_rxring {
- struct mbuf *m[NRXDESC];
- bus_dmamap_t dmap[NRXDESC];
- struct vmxnet3_rxdesc *rxd;
- u_int fill;
- uint8_t gen;
- uint8_t rid;
+ struct vmxnet3_rxbuf *vxrxr_rxbuf;
+ struct vmxnet3_rxdesc *vxrxr_rxd;
+ u_int vxrxr_fill;
+ u_int vxrxr_ndesc;
+ int vxrxr_gen;
+ int vxrxr_rid;
+ struct vmxnet3_dma_alloc vxrxr_dma;
+ bus_dmamap_t vxrxr_spare_dmap;
};
struct vmxnet3_comp_ring {
union {
struct vmxnet3_txcompdesc *txcd;
struct vmxnet3_rxcompdesc *rxcd;
- };
- u_int next;
- uint8_t gen;
+ } vxcr_u;
+ u_int vxcr_next;
+ u_int vxcr_ndesc;
+ int vxcr_gen;
+ struct vmxnet3_dma_alloc vxcr_dma;
+};
+
+struct vmxnet3_txq_stats {
+ uint64_t vmtxs_opackets; /* if_opackets */
+ uint64_t vmtxs_obytes; /* if_obytes */
+ uint64_t vmtxs_omcasts; /* if_omcasts */
+ uint64_t vmtxs_csum;
+ uint64_t vmtxs_tso;
+ uint64_t vmtxs_full;
+ uint64_t vmtxs_offload_failed;
};
struct vmxnet3_txqueue {
- struct vmxnet3_txring cmd_ring;
- struct vmxnet3_comp_ring comp_ring;
- struct vmxnet3_txq_shared *ts;
+ kmutex_t *vxtxq_mtx;
+ struct vmxnet3_softc *vxtxq_sc;
+ int vxtxq_id;
+ int vxtxq_intr_idx;
+ int vxtxq_watchdog;
+ struct vmxnet3_txring vxtxq_cmd_ring;
+ struct vmxnet3_comp_ring vxtxq_comp_ring;
+ struct vmxnet3_txq_stats vxtxq_stats;
+ struct vmxnet3_txq_shared *vxtxq_ts;
+ char vxtxq_name[16];
+};
+
+struct vmxnet3_rxq_stats {
+ uint64_t vmrxs_ipackets; /* if_ipackets */
+ uint64_t vmrxs_ibytes; /* if_ibytes */
+ uint64_t vmrxs_iqdrops; /* if_iqdrops */
+ uint64_t vmrxs_ierrors; /* if_ierrors */
};
struct vmxnet3_rxqueue {
- struct vmxnet3_rxring cmd_ring[2];
- struct vmxnet3_comp_ring comp_ring;
- struct vmxnet3_rxq_shared *rs;
+ kmutex_t *vxrxq_mtx;
+ struct vmxnet3_softc *vxrxq_sc;
+ int vxrxq_id;
+ int vxrxq_intr_idx;
+ struct mbuf *vxrxq_mhead;
+ struct mbuf *vxrxq_mtail;
+ struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
+ struct vmxnet3_comp_ring vxrxq_comp_ring;
+ struct vmxnet3_rxq_stats vxrxq_stats;
+ struct vmxnet3_rxq_shared *vxrxq_rs;
+ char vxrxq_name[16];
+};
+
+struct vmxnet3_statistics {
+ uint32_t vmst_defragged;
+ uint32_t vmst_defrag_failed;
+ uint32_t vmst_mgetcl_failed;
+ uint32_t vmst_mbuf_load_failed;
};
struct vmxnet3_softc {
- device_t sc_dev;
- struct ethercom sc_ethercom;
- struct ifmedia sc_media;
-
- bus_space_tag_t sc_iot0;
- bus_space_tag_t sc_iot1;
- bus_space_handle_t sc_ioh0;
- bus_space_handle_t sc_ioh1;
- bus_dma_tag_t sc_dmat;
-
- struct vmxnet3_txqueue sc_txq[NTXQUEUE];
- struct vmxnet3_rxqueue sc_rxq[NRXQUEUE];
- struct vmxnet3_driver_shared *sc_ds;
- uint8_t *sc_mcast;
+ device_t vmx_dev;
+ struct ethercom vmx_ethercom;
+ struct ifmedia vmx_media;
+ struct vmxnet3_driver_shared *vmx_ds;
+ int vmx_flags;
+#define VMXNET3_FLAG_NO_MSIX (1 << 0)
+#define VMXNET3_FLAG_RSS (1 << 1)
+#define VMXNET3_FLAG_ATTACHED (1 << 2)
+
+ struct vmxnet3_txqueue *vmx_txq;
+ struct vmxnet3_rxqueue *vmx_rxq;
+
+ struct pci_attach_args *vmx_pa;
+
+ bus_space_tag_t vmx_iot0;
+ bus_space_tag_t vmx_iot1;
+ bus_space_handle_t vmx_ioh0;
+ bus_space_handle_t vmx_ioh1;
+ bus_size_t vmx_ios0;
+ bus_size_t vmx_ios1;
+ bus_dma_tag_t vmx_dmat;
+
+ int vmx_link_active;
+ int vmx_ntxqueues;
+ int vmx_nrxqueues;
+ int vmx_ntxdescs;
+ int vmx_nrxdescs;
+ int vmx_max_rxsegs;
+
+ struct vmxnet3_statistics vmx_stats;
+
+ int vmx_intr_type;
+ int vmx_intr_mask_mode;
+ int vmx_event_intr_idx;
+ int vmx_nintrs;
+ pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */
+ void *vmx_ihs[VMXNET3_MAX_INTRS];
+
+ kmutex_t *vmx_mtx;
+
+ uint8_t *vmx_mcast;
+ void *vmx_qs;
+ struct vmxnet3_rss_shared *vmx_rss;
+ callout_t vmx_tick;
+ struct vmxnet3_dma_alloc vmx_ds_dma;
+ struct vmxnet3_dma_alloc vmx_qs_dma;
+ struct vmxnet3_dma_alloc vmx_mcast_dma;
+ struct vmxnet3_dma_alloc vmx_rss_dma;
+ int vmx_max_ntxqueues;
Home |
Main Index |
Thread Index |
Old Index