From 17ff7089e9aa4bf21549601b0b53197ee8ad7ad1 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Sun, 7 Apr 2024 16:49:26 +0800 Subject: [PATCH 1/2] net: wangxun: ngbe: support wangxun 1GbE driver add support for wangxun 1GbE driver, source files and functions are the same as wangxun oob ngbe-1.2.5.3. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/wangxun/Kconfig | 32 + drivers/net/ethernet/wangxun/Makefile | 6 + drivers/net/ethernet/wangxun/ngbe/Makefile | 16 + drivers/net/ethernet/wangxun/ngbe/ngbe.h | 1245 ++ .../net/ethernet/wangxun/ngbe/ngbe_debugfs.c | 778 ++ .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 3921 ++++++ drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c | 4953 ++++++++ drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h | 280 + .../net/ethernet/wangxun/ngbe/ngbe_kcompat.c | 3024 +++++ .../net/ethernet/wangxun/ngbe/ngbe_kcompat.h | 7730 ++++++++++++ drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c | 806 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 10328 ++++++++++++++++ drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c | 692 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h | 172 + .../net/ethernet/wangxun/ngbe/ngbe_osdep.h | 219 + .../net/ethernet/wangxun/ngbe/ngbe_param.c | 932 ++ .../net/ethernet/wangxun/ngbe/ngbe_pcierr.c | 293 + .../net/ethernet/wangxun/ngbe/ngbe_pcierr.h | 6 + drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c | 1777 +++ drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h | 203 + .../net/ethernet/wangxun/ngbe/ngbe_procfs.c | 924 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c | 887 ++ .../net/ethernet/wangxun/ngbe/ngbe_sriov.c | 1590 +++ .../net/ethernet/wangxun/ngbe/ngbe_sriov.h | 76 + .../net/ethernet/wangxun/ngbe/ngbe_sysfs.c | 226 + drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 3030 +++++ 28 files changed, 44148 insertions(+) create mode 100644 drivers/net/ethernet/wangxun/Kconfig create mode 100644 drivers/net/ethernet/wangxun/Makefile create mode 100644 drivers/net/ethernet/wangxun/ngbe/Makefile create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_main.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_param.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c create mode 100644 drivers/net/ethernet/wangxun/ngbe/ngbe_type.h diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 8242ae6e50e5..409811d0869d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -181,6 +181,7 @@ source "drivers/net/ethernet/toshiba/Kconfig" source "drivers/net/ethernet/tundra/Kconfig" source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" +source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index f9ce2e623416..e5ce4b1b2fab 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -93,6 +93,7 @@ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ obj-$(CONFIG_NET_VENDOR_VIA) += via/ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ +obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig new file mode 100644 index 000000000000..b9d37f0ed593 --- /dev/null +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Wangxun network device configuration +# + +config NET_VENDOR_WANGXUN + bool "Wangxun devices" + default y + help + If you have a network (Ethernet) card from Wangxun(R), say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Wangxun(R) cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_WANGXUN + +config NGBE + tristate "Wangxun(R) GbE PCI Express adapters support" + depends on PCI + help + This driver supports Wangxun(R) GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called ngbe. + +endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile new file mode 100644 index 000000000000..a25a08ca464f --- /dev/null +++ b/drivers/net/ethernet/wangxun/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Wangxun network device drivers. +# + +obj-$(CONFIG_NGBE) += ngbe/ diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile new file mode 100644 index 000000000000..d9f34ece1d0f --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/Makefile @@ -0,0 +1,16 @@ +obj-$(CONFIG_NGBE) += ngbe.o + +ngbe-objs := ngbe_main.o \ + ngbe_hw.o \ + ngbe_phy.o \ + ngbe_ethtool.o \ + ngbe_lib.o \ + ngbe_mbx.o \ + ngbe_sriov.o \ + ngbe_pcierr.o \ + ngbe_param.o \ + ngbe_procfs.o \ + ngbe_ptp.o \ + ngbe_sysfs.o \ + ngbe_debugfs.o \ + ngbe_kcompat.o \ No newline at end of file diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h new file mode 100644 index 000000000000..f42705cc0971 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h @@ -0,0 +1,1245 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_H_ +#define _NGBE_H_ + +#ifndef NGBE_NO_LRO +#include +#else +#include +#endif + +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) +#include +#endif + +#include "ngbe_type.h" +#include "ngbe_kcompat.h" + +#ifdef HAVE_XDP_BUFF_RXQ +#include +#endif + +#ifdef HAVE_NDO_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +#ifdef HAVE_SCTP +#include +#endif + +#ifdef HAVE_INCLUDE_LINUX_MDIO_H +#include +#endif + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#include +#endif /* HAVE_PTP_1588_CLOCK */ + +/* Ether Types */ +#define NGBE_ETH_P_LLDP 0x88CC +#define NGBE_ETH_P_CNM 0x22E7 + +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 + +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 + +#define NGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define NGBE_MIN_FCRTL 0x40 +#define NGBE_MAX_FCRTL 0x7FF80 +#define NGBE_MIN_FCRTH 0x600 +#define NGBE_MAX_FCRTH 0x7FFF0 +#define NGBE_DEFAULT_FCPAUSE 0xFFFF +#define NGBE_MIN_FCPAUSE 0 +#define NGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define NGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define NGBE_RXBUFFER_2K 2048 +#define NGBE_RXBUFFER_3K 3072 +#define NGBE_RXBUFFER_4K 4096 +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT +#define NGBE_RXBUFFER_1536 1536 +#define NGBE_RXBUFFER_7K 7168 +#define NGBE_RXBUFFER_8K 8192 +#define NGBE_RXBUFFER_15K 15360 +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +#define NGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define NGBE_RX_HDR_SIZE NGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define NGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#ifdef HAVE_STRUCT_DMA_ATTRS +#define NGBE_RX_DMA_ATTR NULL +#else +#define NGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +/* assume the kernel supports 8021p to avoid stripping vlan tags */ +#ifdef NGBE_DISABLE_8021P_SUPPORT +#ifndef HAVE_8021P_SUPPORT +#define HAVE_8021P_SUPPORT +#endif +#endif /* NGBE_DISABLE_8021P_SUPPORT */ + +enum ngbe_tx_flags { + /* cmd_type flags */ + NGBE_TX_FLAGS_HW_VLAN = 0x01, + NGBE_TX_FLAGS_TSO = 0x02, + NGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + NGBE_TX_FLAGS_CC = 0x08, + NGBE_TX_FLAGS_IPV4 = 0x10, + NGBE_TX_FLAGS_CSUM = 0x20, + NGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + NGBE_TX_FLAGS_LINKSEC = 0x200, + NGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + NGBE_TX_FLAGS_SW_VLAN = 0x40, + NGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define NGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define NGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define NGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define NGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define NGBE_MAX_RX_DESC_POLL 10 + +#define NGBE_MAX_VF_MC_ENTRIES 30 +#define NGBE_MAX_VF_FUNCTIONS 8 +#define MAX_EMULATION_MAC_ADDRS 16 +#define NGBE_MAX_PF_MACVLANS 15 +#define NGBE_VF_DEVICE_ID 0x1000 + +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = rd32(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = rd32(hw, reg_lsb); \ + u64 current_counter_msb = rd32(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } +#ifdef HAVE_XDP_SUPPORT +DECLARE_STATIC_KEY_FALSE(ngbe_xdp_locking_key); +#endif + +#ifndef XDP_PACKET_HEADROOM +#define XDP_PACKET_HEADROOM 256 +#endif + +struct vf_stats { + u64 gprc; + u64 gorc; + u64 gptc; + u64 gotc; + u64 mprc; +}; + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 IOMEM *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[NGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + struct vf_stats vfstats; + struct vf_stats last_vfstats; + struct vf_stats saved_rst_vfstats; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + bool rss_query_enabled; +#endif + u8 trusted; + int xcast_mode; + unsigned int vf_api; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#ifndef NGBE_NO_LRO +#define NGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/ +#define NGBE_LRO_GLOBAL 10 + +struct ngbe_lro_stats { + u32 flushed; + u32 coal; +}; + +/* + * ngbe_lro_header - header format to be aggregated by LRO + * @iph: IP header without options + * @tcp: TCP header + * @ts: Optional TCP timestamp data in TCP options + * + * This structure relies on the check above that verifies that the header + * is IPv4 and does not contain any options. + */ +struct ngbe_lrohdr { + struct iphdr iph; + struct tcphdr th; + __be32 ts[0]; +}; + +struct ngbe_lro_list { + struct sk_buff_head active; + struct ngbe_lro_stats stats; +}; + +#endif /* NGBE_NO_LRO */ +#define NGBE_MAX_TXD_PWR 14 +#define NGBE_MAX_DATA_PER_TXD (1 << NGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), NGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ngbe_tx_buffer { + union ngbe_tx_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ +#ifdef HAVE_XDP_FRAME_STRUCT + struct xdp_frame *xdpf; +#else + void *data; +#endif + }; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ngbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +#endif +}; + +struct ngbe_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct ngbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ngbe_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +#define NGBE_TS_HDR_LEN 8 +enum ngbe_ring_state_t { +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + __NGBE_RX_3K_BUFFER, + __NGBE_RX_BUILD_SKB_ENABLED, +#endif + __NGBE_TX_XPS_INIT_DONE, + __NGBE_TX_DETECT_HANG, + __NGBE_HANG_CHECK_ARMED, + __NGBE_RX_HS_ENABLED, + __NGBE_TX_XDP_RING, +}; + +struct ngbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct ngbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__NGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + + +#define ring_is_hs_enabled(ring) \ + test_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define set_ring_hs_enabled(ring) \ + set_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_xdp(ring) \ + test_bit(__NGBE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__NGBE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__NGBE_TX_XDP_RING, &(ring)->state) + +struct ngbe_ring { + struct ngbe_ring *next; /* pointer to next ring in q_vector */ + struct ngbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device for DMA mapping */ + struct ngbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct ngbe_tx_buffer *tx_buffer_info; + struct ngbe_rx_buffer *rx_buffer_info; + }; + spinlock_t tx_lock; /* used in XDP mode */ + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + +#ifdef HAVE_PTP_1588_CLOCK + unsigned long last_rx_timestamp; + +#endif + u16 rx_buf_len; + union { +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + u16 next_to_alloc; +#endif + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct ngbe_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif + union { + struct ngbe_tx_queue_stats tx_stats; + struct ngbe_rx_queue_stats rx_stats; + }; +#ifdef HAVE_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#endif +} ____cacheline_internodealigned_in_smp; + +enum ngbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define TGB_MAX_RX_QUEUES 16 +#define NGBE_MAX_TX_QUEUES 16 +#define NGBE_MAX_XDP_QS NGBE_MAX_TX_QUEUES + + + +#define NGBE_MAX_RSS_INDICES 8 +#define NGBE_MAX_VMDQ_INDICES 8 +#define NGBE_MAX_FDIR_INDICES 8 +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES 8 +#define NGBE_MAX_L2A_QUEUES 4 +#define NGBE_BAD_L2A_QUEUE 3 + +#define NGBE_MAX_MACVLANS 8 + +struct ngbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int ngbe_rx_bufsz(struct ngbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(NGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + return NGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int ngbe_rx_pg_order(struct ngbe_ring __maybe_unused *ring) +{ + return 0; +} +#define ngbe_rx_pg_size(_ring) (PAGE_SIZE << ngbe_rx_pg_order(_ring)) + +#endif +struct ngbe_ring_container { + struct ngbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ngbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & NGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ngbe_q_vector { + struct ngbe_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct ngbe_ring_container rx, tx; + + struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; +#endif +#ifndef NGBE_NO_LRO + struct ngbe_lro_list lrolist; /* LRO list for queue vector*/ +#endif + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + +#ifdef HAVE_NDO_BUSY_POLL + atomic_t state; +#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct ngbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#ifdef HAVE_NDO_BUSY_POLL +enum ngbe_qv_state_t { + NGBE_QV_STATE_IDLE = 0, + NGBE_QV_STATE_NAPI, + NGBE_QV_STATE_POLL, + NGBE_QV_STATE_DISABLE +}; + +static inline void ngbe_qv_init_lock(struct ngbe_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, NGBE_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ngbe_qv_lock_napi(struct ngbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, NGBE_QV_STATE_IDLE, + NGBE_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != NGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == NGBE_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void ngbe_qv_unlock_napi(struct ngbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != NGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, NGBE_QV_STATE_IDLE); +} + +/* called from ngbe_low_latency_poll() */ +static inline bool ngbe_qv_lock_poll(struct ngbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, NGBE_QV_STATE_IDLE, + NGBE_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != NGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + return rc == NGBE_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void ngbe_qv_unlock_poll(struct ngbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != NGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, NGBE_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ngbe_qv_busy_polling(struct ngbe_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == NGBE_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool ngbe_qv_disable(struct ngbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, NGBE_QV_STATE_IDLE, + NGBE_QV_STATE_DISABLE); + + return rc == NGBE_QV_STATE_IDLE; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +#ifdef NGBE_HWMON + +#define NGBE_HWMON_TYPE_TEMP 0 +#define NGBE_HWMON_TYPE_ALARMTHRESH 1 +#define NGBE_HWMON_TYPE_DALARMTHRESH 2 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor; + char name[19]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; +#endif /* NGBE_HWMON */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define NGBE_70K_ITR 57 +#define NGBE_20K_ITR 200 +#define NGBE_4K_ITR 1024 +#define NGBE_7K_ITR 595 + +/* ngbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ngbe_test_staterr(union ngbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* ngbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 ngbe_desc_unused(struct ngbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define NGBE_RX_DESC(R, i) \ + (&(((union ngbe_rx_desc *)((R)->desc))[i])) +#define NGBE_TX_DESC(R, i) \ + (&(((union ngbe_tx_desc *)((R)->desc))[i])) +#define NGBE_TX_CTXTDESC(R, i) \ + (&(((struct ngbe_tx_context_desc *)((R)->desc))[i])) + +#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +#define NGBE_MAX_MSIX_Q_VECTORS_EMERALD 9 + +struct ngbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +#define NGBE_MAC_STATE_DEFAULT 0x1 +#define NGBE_MAC_STATE_MODIFIED 0x2 +#define NGBE_MAC_STATE_IN_USE 0x4 + +#ifdef NGBE_PROCFS +struct ngbe_therm_proc_data { + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor_data; +}; +#endif + +/* + * Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS NGBE_MAX_MSIX_Q_VECTORS_EMERALD +#define MAX_MSIX_COUNT NGBE_MAX_MSIX_VECTORS_EMERALD + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define NGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define NGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +/** + * ngbe_adapter.flag + **/ +#define NGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define NGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define NGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define NGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#ifndef NGBE_NO_LLI +#define NGBE_FLAG_LLI_PUSH (u32)(1 << 4) +#endif + +#define NGBE_FLAG_TPH_ENABLED (u32)(1 << 6) +#define NGBE_FLAG_TPH_CAPABLE (u32)(1 << 7) +#define NGBE_FLAG_TPH_ENABLED_DATA (u32)(1 << 8) + +#define NGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define NGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define NGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define NGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define NGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define NGBE_FLAG_NEED_ANC_CHECK (u32)(1 << 14) +#define NGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define NGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#define NGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define NGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define NGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define NGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define NGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define NGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define NGBE_FLAG_NEED_ETH_PHY_RESET (u32)(1 << 28) +#define NGBE_FLAG_RX_HS_ENABLED (u32)(1 << 30) +#define NGBE_FLAG_LINKSEC_ENABLED (u32)(1 << 31) +#define NGBE_FLAG_IPSEC_ENABLED (u32)(1 << 5) + +/* preset defaults */ +#define NGBE_FLAGS_SP_INIT (NGBE_FLAG_MSI_CAPABLE \ + | NGBE_FLAG_MSIX_CAPABLE \ + | NGBE_FLAG_MQ_CAPABLE \ + | NGBE_FLAG_SRIOV_CAPABLE) + +/** + * ngbe_adapter.flag2 + **/ +#ifndef NGBE_NO_HW_RSC +#define NGBE_FLAG2_RSC_CAPABLE (1U << 0) +#define NGBE_FLAG2_RSC_ENABLED (1U << 1) +#else +#define NGBE_FLAG2_RSC_CAPABLE (0U) +#define NGBE_FLAG2_RSC_ENABLED (0U) +#endif +#define NGBE_FLAG2_TEMP_SENSOR_CAPABLE (1U << 3) +#define NGBE_FLAG2_TEMP_SENSOR_EVENT (1U << 4) +#define NGBE_FLAG2_SEARCH_FOR_SFP (1U << 5) +#define NGBE_FLAG2_SFP_NEEDS_RESET (1U << 6) +#define NGBE_FLAG2_PF_RESET_REQUESTED (1U << 7) +#define NGBE_FLAG2_FDIR_REQUIRES_REINIT (1U << 8) +#define NGBE_FLAG2_RSS_FIELD_IPV4_UDP (1U << 9) +#define NGBE_FLAG2_RSS_FIELD_IPV6_UDP (1U << 10) +#define NGBE_FLAG2_RSS_ENABLED (1U << 12) +#define NGBE_FLAG2_PTP_PPS_ENABLED (1U << 11) +#define NGBE_FLAG2_EEE_CAPABLE (1U << 14) +#define NGBE_FLAG2_EEE_ENABLED (1U << 15) +#define NGBE_FLAG2_VXLAN_REREG_NEEDED (1U << 16) +#define NGBE_FLAG2_DEV_RESET_REQUESTED (1U << 18) +#define NGBE_FLAG2_RESET_INTR_RECEIVED (1U << 19) +#define NGBE_FLAG2_GLOBAL_RESET_REQUESTED (1U << 20) +#define NGBE_FLAG2_MNG_REG_ACCESS_DISABLED (1U << 22) +#define NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP (1U << 23) +#define NGBE_FLAG2_ECC_ERR_RESET (1U << 24) +#define NGBE_FLAG2_PCIE_NEED_RECOVER (1U << 31) + +#define NGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +enum ngbe_isb_idx { + NGBE_ISB_HEADER, + NGBE_ISB_MISC, + NGBE_ISB_VEC0, + NGBE_ISB_VEC1, + NGBE_ISB_MAX +}; + +/* board specific private data structure */ +struct ngbe_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ||\ + defined(NETIF_F_HW_VLAN_STAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; /* must be first, see ngbe_receive_skb */ +#else /* HAVE_VLAN_RX_REGISTER */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif /* HAVE_VLAN_RX_REGISTER */ +#endif + /* OS defined structs */ + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct pci_dev *pdev; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; + u32 flags2; + u32 led_conf; + u32 gphy_efuse[2]; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + + /* XDP */ + int num_xdp_queues; + struct ngbe_ring *xdp_ring[NGBE_MAX_XDP_QS]; + + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + + /* TX */ + struct ngbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct ngbe_ring *rx_ring[MAX_RX_QUEUES]; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct ngbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + +#ifdef HAVE_DCBNL_IEEE + struct ieee_pfc *ngbe_ieee_pfc; + struct ieee_ets *ngbe_ieee_ets; +#endif + enum ngbe_fc_mode last_lfc_mode; + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + struct ngbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + u16 irq_remap_offset; + struct msix_entry *msix_entries; + u16 old_rss_limit; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif +#ifndef NGBE_NO_LRO + struct ngbe_lro_stats lro_stats; +#endif + +#ifdef ETHTOOL_TEST + u64 test_icr; + struct ngbe_ring test_tx_ring; + struct ngbe_ring test_rx_ring; +#endif + + /* structs defined in ngbe_hw.h */ + struct ngbe_hw hw; + u16 msg_enable; + struct ngbe_hw_stats stats; +#ifndef NGBE_NO_LLI + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; +#endif /* NGBE_NO_LLI */ + + struct ngbe_queue_stats old_rx_qstats[MAX_RX_QUEUES]; + struct ngbe_queue_stats old_tx_qstats[MAX_TX_QUEUES]; + struct ngbe_tx_queue_stats old_tx_stats[MAX_TX_QUEUES]; + struct ngbe_rx_queue_stats old_rx_stats[MAX_RX_QUEUES]; + + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int xdp_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + struct timer_list link_check_timer; + + u32 atr_sample_rate; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bd_number; + +#ifdef HAVE_BRIDGE_ATTRIBS + u16 bridge_mode; +#endif + + char eeprom_id[32]; + u16 eeprom_cap; + bool netdev_registered; + u32 interrupt_event; +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + u32 led_reg; +#endif + +#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp) (struct ngbe_adapter *); +#endif /* HAVE_PTP_1588_CLOCK */ + + DECLARE_BITMAP(active_vfs, NGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; +#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; +#endif + struct ngbe_mac_addr *mac_table; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + __le16 vxlan_port; +#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + __le16 geneve_port; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef NGBE_SYSFS +#ifdef NGBE_HWMON + struct hwmon_buff ngbe_hwmon_buff; +#endif /* NGBE_HWMON */ +#else /* NGBE_SYSFS */ +#ifdef NGBE_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + u64 old_lsc; + struct proc_dir_entry *therm_dir; + struct ngbe_therm_proc_data therm_data; +#endif /* NGBE_PROCFS */ +#endif /* NGBE_SYSFS */ + +#ifdef HAVE_NGBE_DEBUG_FS + struct dentry *ngbe_dbg_adapter; +#endif /*HAVE_NGBE_DEBUG_FS*/ + u8 default_up; +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + unsigned int indices; +#endif /* !HAVE_NETDEV_SELECT_QUEUE*/ +#endif /* HAVE_TX_MQ */ + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; + +#define NGBE_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES]; +#define NGBE_RSS_KEY_SIZE 40 + u32 rss_key[NGBE_RSS_KEY_SIZE / sizeof(u32)]; + + void *ipsec; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[NGBE_ISB_MAX]; + + u32 hang_cnt; + u64 eth_priv_flags; +#define NGBE_ETH_PRIV_FLAG_LLDP BIT(0) +}; + +static inline u32 ngbe_misc_isb(struct ngbe_adapter *adapter, + enum ngbe_isb_idx idx) +{ + u32 cur_tag = 0; + + cur_tag = adapter->isb_mem[NGBE_ISB_HEADER]; + adapter->isb_tag[idx] = cur_tag; + + return cpu_to_le32(adapter->isb_mem[idx]); +} + +static inline u8 ngbe_max_rss_indices(struct ngbe_adapter *adapter) +{ + if (adapter->xdp_prog) + return NGBE_MAX_RSS_INDICES / 2; + return NGBE_MAX_RSS_INDICES; +} + +enum ngbe_state_t { + __NGBE_TESTING, + __NGBE_RESETTING, + __NGBE_DOWN, + __NGBE_HANGING, + __NGBE_DISABLED, + __NGBE_REMOVING, + __NGBE_SERVICE_SCHED, + __NGBE_SERVICE_INITED, + __NGBE_IN_SFP_INIT, + __NGBE_NO_PHY_SET, +#ifdef HAVE_PTP_1588_CLOCK + __NGBE_PTP_RUNNING, + __NGBE_PTP_TX_IN_PROGRESS, +#endif +}; + +struct ngbe_cb { +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + dma_addr_t dma; +#ifndef NGBE_NO_LRO + __be32 tsecr; /* timestamp echo response */ + u32 tsval; /* timestamp value in host order */ + u32 next_seq; /* next expected sequence number */ + u16 free; /* 65521 minus total size */ + u16 mss; /* size of data portion of packet */ +#endif /* NGBE_NO_LRO */ +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + bool page_released; + bool dma_released; +#endif +}; +#define NGBE_CB(skb) ((struct ngbe_cb *)(skb)->cb) + +/* ESX ngbe CIM IOCTL definition */ + +#ifdef NGBE_SYSFS +void ngbe_sysfs_exit(struct ngbe_adapter *adapter); +int ngbe_sysfs_init(struct ngbe_adapter *adapter); +#endif /* NGBE_SYSFS */ +#ifdef NGBE_PROCFS +void ngbe_procfs_exit(struct ngbe_adapter *adapter); +int ngbe_procfs_init(struct ngbe_adapter *adapter); +int ngbe_procfs_topdir_init(void); +void ngbe_procfs_topdir_exit(void); +#endif /* NGBE_PROCFS */ + +/* needed by ngbe_main.c */ +int ngbe_validate_mac_addr(u8 *mc_addr); +void ngbe_check_options(struct ngbe_adapter *adapter); +void ngbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by ngbe_ethtool.c */ +extern char ngbe_driver_name[]; +extern const char ngbe_driver_version[]; + +void ngbe_irq_disable(struct ngbe_adapter *adapter); +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush); +int ngbe_open(struct net_device *netdev); +int ngbe_close(struct net_device *netdev); +void ngbe_up(struct ngbe_adapter *adapter); +void ngbe_down(struct ngbe_adapter *adapter); +void ngbe_reinit_locked(struct ngbe_adapter *adapter); +void ngbe_reset(struct ngbe_adapter *adapter); +void ngbe_set_ethtool_ops(struct net_device *netdev); +int ngbe_setup_rx_resources(struct ngbe_ring *); +int ngbe_setup_tx_resources(struct ngbe_ring *); +void ngbe_free_rx_resources(struct ngbe_ring *); +void ngbe_free_tx_resources(struct ngbe_ring *); +void ngbe_configure_rx_ring(struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_configure_tx_ring(struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_update_stats(struct ngbe_adapter *adapter); +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter); +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter); +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *, + struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *, + struct ngbe_tx_buffer *); +void ngbe_alloc_rx_buffers(struct ngbe_ring *, u16); + +void ngbe_set_rx_mode(struct net_device *netdev); +int ngbe_write_mc_addr_list(struct net_device *netdev); +int ngbe_setup_tc(struct net_device *dev, u8 tc, bool save_stats); +void ngbe_tx_ctxtdesc(struct ngbe_ring *, u32, u32, u32, u32); +void ngbe_do_reset(struct net_device *netdev); +void ngbe_write_eitr(struct ngbe_q_vector *q_vector); +int ngbe_poll(struct napi_struct *napi, int budget); +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *); +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter); +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr); +#endif +void ngbe_print_tx_hang_status(struct ngbe_adapter *adapter); + +#ifdef HAVE_NGBE_DEBUG_FS +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter); +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter); +void ngbe_dbg_init(void); +void ngbe_dbg_exit(void); +void ngbe_dump(struct ngbe_adapter *adapter); +#endif /* HAVE_NGBE_DEBUG_FS */ + +static inline struct netdev_queue *txring_txq(const struct ngbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ngbe_wol_supported(struct ngbe_adapter *adapter); +int ngbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool); +void ngbe_full_sync_mac_table(struct ngbe_adapter *adapter); +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, + u8 *addr, u16 pool); +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, + u8 *addr, u16 pool); +int ngbe_available_rars(struct ngbe_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +void ngbe_vlan_mode(struct net_device *, u32); +#endif + +#ifdef HAVE_PTP_1588_CLOCK +void ngbe_ptp_init(struct ngbe_adapter *adapter); +void ngbe_ptp_stop(struct ngbe_adapter *adapter); +void ngbe_ptp_suspend(struct ngbe_adapter *adapter); +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb); +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter); +void ngbe_ptp_reset(struct ngbe_adapter *adapter); +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter); +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef CONFIG_PCI_IOV +void ngbe_sriov_reinit(struct ngbe_adapter *adapter); +#endif + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter); + +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter); +void ngbe_store_reta(struct ngbe_adapter *adapter); + +/** + * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. + * disable a interrupt by writing to PX_IMS with the corresponding bit=1 + * enable a interrupt by writing to PX_IMC with the corresponding bit=1 + * trigger a interrupt by writing to PX_ICS with the corresponding bit=1 + **/ +//#define NGBE_INTR_ALL (~0ULL) +#define NGBE_INTR_ALL 0x1FF +#define NGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors) +#define NGBE_INTR_MISC_VMDQ(A) (1ULL << ((A)->num_q_vectors + (A)->ring_feature[RING_F_VMDQ].offset)) +#define NGBE_INTR_QALL(A) (NGBE_INTR_MISC(A) - 1) +#define NGBE_INTR_Q(i) (1ULL << (i)) +static inline void ngbe_intr_enable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) { + wr32(hw, NGBE_PX_IMC, mask); + } +} + +static inline void ngbe_intr_disable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_IMS, mask); +} + +static inline void ngbe_intr_trigger(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_ICS, mask); +} + +#define NGBE_RING_SIZE(R) ((R)->count < NGBE_MAX_TXD ? (R)->count / 128 : 0) + +#endif /* _NGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c new file mode 100644 index 000000000000..00a33583da9a --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c @@ -0,0 +1,778 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" + +#ifdef HAVE_NGBE_DEBUG_FS +#include +#include + +static struct dentry *ngbe_dbg_root; +static int ngbe_data_mode; + +#define NGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF) +#define NGBE_DATA_ARGS(dm) ((dm) & 0xFFFF) +enum ngbe_data_func { + NGBE_FUNC_NONE = (0 << 16), + NGBE_FUNC_DUMP_BAR = (1 << 16), + NGBE_FUNC_DUMP_RDESC = (2 << 16), + NGBE_FUNC_DUMP_TDESC = (3 << 16), + NGBE_FUNC_FLASH_READ = (4 << 16), + NGBE_FUNC_FLASH_WRITE = (5 << 16), +}; + +/** + * data operation + **/ +static ssize_t +ngbe_simple_read_from_pcibar(struct ngbe_adapter *adapter, int res, + void __user *buf, size_t size, loff_t *ppos) +{ + loff_t pos = *ppos; + u32 miss, len, limit = pci_resource_len(adapter->pdev, res); + + if (pos < 0) + return 0; + + limit = (pos + size <= limit ? pos + size : limit); + for (miss = 0; pos < limit && !miss; buf += len, pos += len) { + u32 val = 0, reg = round_down(pos, 4); + u32 off = pos - reg; + + len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4)); + val = ngbe_rd32(adapter->io_addr + reg); + miss = copy_to_user(buf, &val + off, len); + } + + size = pos - *ppos - miss; + *ppos += size; + + return size; +} + +static ssize_t +ngbe_simple_read_from_flash(struct ngbe_adapter *adapter, + void __user *buf, size_t size, loff_t *ppos) +{ + struct ngbe_hw *hw = &adapter->hw; + loff_t pos = *ppos; + size_t ret = 0; + loff_t rpos, rtail; + void __user *to = buf; + size_t available = adapter->hw.flash.dword_size << 2; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !size) + return 0; + if (size > available - pos) + size = available - pos; + + rpos = round_up(pos, 4); + rtail = round_down(pos + size, 4); + if (rtail < rpos) + return 0; + + to += rpos - pos; + while (rpos <= rtail) { + u32 value = ngbe_rd32(adapter->io_addr + rpos); + if (hw->flash.ops.write_buffer(hw, rpos>>2, 1, &value)) { + ret = size; + break; + } + if (4 == copy_to_user(to, &value, 4)) { + ret = size; + break; + } + to += 4; + rpos += 4; + } + + if (ret == size) + return -EFAULT; + size -= ret; + *ppos = pos + size; + return size; +} + +static ssize_t +ngbe_simple_write_to_flash(struct ngbe_adapter *adapter, + const void __user *from, size_t size, loff_t *ppos, size_t available) +{ + return size; +} + +static ssize_t +ngbe_dbg_data_ops_read(struct file *filp, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + rmb(); + + switch (func) { + case NGBE_FUNC_DUMP_BAR: { + u32 bar = NGBE_DATA_ARGS(ngbe_data_mode); + + return ngbe_simple_read_from_pcibar(adapter, bar, buffer, size, + ppos); + } + case NGBE_FUNC_FLASH_READ: { + return ngbe_simple_read_from_flash(adapter, buffer, size, ppos); + } + case NGBE_FUNC_DUMP_RDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_rx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->rx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + case NGBE_FUNC_DUMP_TDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_tx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->tx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + default: + break; + } + + return 0; +} + +static ssize_t +ngbe_dbg_data_ops_write(struct file *filp, + const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + rmb(); + + switch (func) { + case NGBE_FUNC_FLASH_WRITE: { + u32 size = NGBE_DATA_ARGS(ngbe_data_mode); + + if (size > adapter->hw.flash.dword_size << 2) + size = adapter->hw.flash.dword_size << 2; + + return ngbe_simple_write_to_flash(adapter, buffer, size, ppos, size); + } + default: + break; + } + + return size; +} +static struct file_operations ngbe_dbg_data_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_data_ops_read, + .write = ngbe_dbg_data_ops_write, +}; + +/** + * reg_ops operation + **/ +static char ngbe_dbg_reg_ops_buf[256] = ""; +static ssize_t +ngbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *pc = ngbe_dbg_reg_ops_buf; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_reg_ops_buf, + sizeof(ngbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + pc[len] = '\0'; + + if (strncmp(pc, "dump", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 4; + pc += strspn(pc, " \t"); + + if (!strncmp(pc, "bar", 3)) { + pc += 3; + mode = NGBE_FUNC_DUMP_BAR; + } else if (!strncmp(pc, "rdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_RDESC; + } else if (!strncmp(pc, "tdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_TDESC; + } else { + ngbe_dump(adapter); + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + ngbe_data_mode = mode; + } else if (strncmp(pc, "flash", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 5; + pc += strspn(pc, " \t"); + if (!strncmp(pc, "read", 3)) { + pc += 4; + mode = NGBE_FUNC_FLASH_READ; + } else if (!strncmp(pc, "write", 5)) { + pc += 5; + mode = NGBE_FUNC_FLASH_WRITE; + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + ngbe_data_mode = mode; + } else if (strncmp(ngbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ngbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + wr32(&adapter->hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(ngbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ngbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = rd32(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", ngbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations ngbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_reg_ops_read, + .write = ngbe_dbg_reg_ops_write, +}; + +/** + * netdev_ops operation + **/ +static char ngbe_dbg_netdev_ops_buf[256] = ""; +static ssize_t +ngbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_netdev_ops_buf, + sizeof(ngbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ngbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ngbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { +#if defined(HAVE_TX_TIMEOUT_TXQUEUE) + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, 0); +#elif defined(HAVE_NET_DEVICE_OPS) + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#else + adapter->netdev->tx_timeout(adapter->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ngbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static struct file_operations ngbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_netdev_ops_read, + .write = ngbe_dbg_netdev_ops_write, +}; + +/** + * ngbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + + adapter->ngbe_dbg_adapter = debugfs_create_dir(name, ngbe_dbg_root); + if (!adapter->ngbe_dbg_adapter) { + e_dev_err("debugfs entry for %s failed\n", name); + return; + } + + pfile = debugfs_create_file("data", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_data_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); +} + +/** + * ngbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter) +{ + if (adapter->ngbe_dbg_adapter) + debugfs_remove_recursive(adapter->ngbe_dbg_adapter); + adapter->ngbe_dbg_adapter = NULL; +} + +/** + * ngbe_dbg_init - start up debugfs for the driver + **/ +void ngbe_dbg_init(void) +{ + ngbe_dbg_root = debugfs_create_dir(ngbe_driver_name, NULL); + if (ngbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ngbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ngbe_dbg_exit(void) +{ + debugfs_remove_recursive(ngbe_dbg_root); +} + +#endif /* HAVE_NGBE_DEBUG_FS */ + +struct ngbe_reg_info { + u32 offset; + u32 length; + char *name; +}; + +static struct ngbe_reg_info ngbe_reg_info_tbl[] = { + + /* General Registers */ + {NGBE_CFG_PORT_CTL, 1, "CTRL"}, + {NGBE_CFG_PORT_ST, 1, "STATUS"}, + + /* RX Registers */ + {NGBE_PX_RR_CFG(0), 1, "SRRCTL"}, + {NGBE_PX_RR_RP(0), 1, "RDH"}, + {NGBE_PX_RR_WP(0), 1, "RDT"}, + {NGBE_PX_RR_CFG(0), 1, "RXDCTL"}, + {NGBE_PX_RR_BAL(0), 1, "RDBAL"}, + {NGBE_PX_RR_BAH(0), 1, "RDBAH"}, + + /* TX Registers */ + {NGBE_PX_TR_BAL(0), 1, "TDBAL"}, + {NGBE_PX_TR_BAH(0), 1, "TDBAH"}, + {NGBE_PX_TR_RP(0), 1, "TDH"}, + {NGBE_PX_TR_WP(0), 1, "TDT"}, + {NGBE_PX_TR_CFG(0), 1, "TXDCTL"}, + + /* MACVLAN */ + {NGBE_PSR_MAC_SWC_VM, 128, "PSR_MAC_SWC_VM"}, + {NGBE_PSR_MAC_SWC_AD_L, 32, "PSR_MAC_SWC_AD"}, + {NGBE_PSR_VLAN_TBL(0), 128, "PSR_VLAN_TBL"}, + + /* List Terminator */ + { .name = NULL } +}; + +/** + * ngbe_regdump - register printout routine + **/ +static void +ngbe_regdump(struct ngbe_hw *hw, struct ngbe_reg_info *reg_info) +{ +#if 0 + int i, n = 0; + u32 buffer[32*8]; + + switch (reg_info->offset) { + case NGBE_PSR_MAC_SWC_AD_L: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + } + break; + default: + for (i = 0; i < reg_info->length; i++) { + buffer[n++] = rd32(hw, + reg_info->offset + 4*i); + } + break; + } + for (i = 0; n && i < 32; i++) { + pr_info("%-20s[%02x-%02x]", reg_info->name, i*8, i*8 + 7); + for (j = 0; n && j < 8; j++, n--) + pr_cont(" %08x", buffer[i*8 + j]); + pr_cont("\n"); + } + BUG_ON(n); +#endif +} + +/** + * ngbe_dump - Print registers, tx-rings and rx-rings + **/ +void ngbe_dump(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_reg_info *reg_info; + int n = 0; + struct ngbe_ring *tx_ring; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ngbe_ring *rx_ring; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer_info; + u32 staterr; +#endif + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reg_info = ngbe_reg_info_tbl; reg_info->name; reg_info++) { + ngbe_regdump(hw, reg_info); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Transmit Descriptor (Read) + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * Transmit Descriptor (Write-Back) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = NGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX " + "%08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Receive Descriptor Formats + * + * Receive Descriptor (Read) + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Receive Descriptor (Write-Back) + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = NGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & NGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->page_dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->page_dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + ngbe_rx_bufsz(rx_ring), true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } +#endif +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c new file mode 100644 index 000000000000..4347b0b008dc --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -0,0 +1,3921 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* ethtool support for ngbe */ + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef SIOCETHTOOL +#include + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#ifdef HAVE_ETHTOOL_GET_TS_INFO +#include +#endif + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#define NGBE_ALL_RAR_ENTRIES 16 + +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif + +#ifdef HAVE_XDP_SUPPORT +#include +#endif + +#ifdef ETHTOOL_GSTATS +struct ngbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct ngbe_stats ngbe_gstrings_net_stats[] = { + NGBE_NETDEV_STAT(rx_packets), + NGBE_NETDEV_STAT(tx_packets), + NGBE_NETDEV_STAT(rx_bytes), + NGBE_NETDEV_STAT(tx_bytes), + NGBE_NETDEV_STAT(rx_errors), + NGBE_NETDEV_STAT(tx_errors), + NGBE_NETDEV_STAT(rx_dropped), + NGBE_NETDEV_STAT(tx_dropped), + NGBE_NETDEV_STAT(collisions), + NGBE_NETDEV_STAT(rx_over_errors), + NGBE_NETDEV_STAT(rx_crc_errors), + NGBE_NETDEV_STAT(rx_frame_errors), + NGBE_NETDEV_STAT(rx_fifo_errors), + NGBE_NETDEV_STAT(rx_missed_errors), + NGBE_NETDEV_STAT(tx_aborted_errors), + NGBE_NETDEV_STAT(tx_carrier_errors), + NGBE_NETDEV_STAT(tx_fifo_errors), + NGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define NGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct ngbe_adapter, _stat), \ + .stat_offset = offsetof(struct ngbe_adapter, _stat) \ +} +static struct ngbe_stats ngbe_gstrings_stats[] = { + NGBE_STAT("rx_pkts_nic", stats.gprc), + NGBE_STAT("tx_pkts_nic", stats.gptc), + NGBE_STAT("rx_bytes_nic", stats.gorc), + NGBE_STAT("tx_bytes_nic", stats.gotc), + NGBE_STAT("lsc_int", lsc_int), + NGBE_STAT("tx_busy", tx_busy), + NGBE_STAT("non_eop_descs", non_eop_descs), + NGBE_STAT("rx_broadcast", stats.bprc), + NGBE_STAT("tx_broadcast", stats.bptc), + NGBE_STAT("rx_multicast", stats.mprc), + NGBE_STAT("tx_multicast", stats.mptc), + NGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + NGBE_STAT("tx_timeout_count", tx_timeout_count), + NGBE_STAT("tx_restart_queue", restart_queue), + NGBE_STAT("rx_long_length_count", stats.roc), + NGBE_STAT("rx_short_length_count", stats.ruc), + NGBE_STAT("tx_flow_control_xon", stats.lxontxc), + NGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + NGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + NGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + NGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + NGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + NGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + NGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +#ifndef NGBE_NO_LRO + NGBE_STAT("lro_aggregated", lro_stats.coal), + NGBE_STAT("lro_flushed", lro_stats.flushed), +#endif /* NGBE_NO_LRO */ + NGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + NGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + NGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + NGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + NGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), +#ifdef HAVE_PTP_1588_CLOCK + NGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + NGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +#endif /* HAVE_PTP_1588_CLOCK */ +}; + +/* ngbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define NGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define NGBE_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define NGBE_NUM_RX_QUEUES adapter->indices +#define NGBE_NUM_TX_QUEUES adapter->indices +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define NGBE_NUM_RX_QUEUES 1 +#define NGBE_NUM_TX_QUEUES ( \ + ((struct ngbe_adapter *)netdev_priv(netdev))->num_tx_queues) +#endif /* HAVE_TX_MQ */ + +#define NGBE_QUEUE_STATS_LEN ( \ + (NGBE_NUM_TX_QUEUES + NGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ngbe_queue_stats) / sizeof(u64))) +#define NGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ngbe_gstrings_stats) +#define NGBE_NETDEV_STATS_LEN ARRAY_SIZE(ngbe_gstrings_net_stats) +#define NGBE_PB_STATS_LEN ( \ + (sizeof(((struct ngbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define NGBE_VF_STATS_LEN \ + ((((struct ngbe_adapter *)netdev_priv(netdev))->num_vfs) * \ + (sizeof(struct vf_stats) / sizeof(u64))) +#define NGBE_STATS_LEN (NGBE_GLOBAL_STATS_LEN + \ + NGBE_NETDEV_STATS_LEN + \ + NGBE_PB_STATS_LEN + \ + NGBE_QUEUE_STATS_LEN + \ + NGBE_VF_STATS_LEN) + +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char ngbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define NGBE_TEST_LEN (sizeof(ngbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +struct ngbe_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u64 flag; + bool read_only; +}; + +#define NGBE_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct ngbe_priv_flags ngbe_gstrings_priv_flags[] = { + NGBE_PRIV_FLAG("lldp", NGBE_ETH_PRIV_FLAG_LLDP, 0), +}; + +#define NGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ngbe_gstrings_priv_flags) + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ngbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + bool link_up = 0; + u16 yt_mode = 0; + unsigned long flags; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if ((hw->phy.type == ngbe_phy_m88e1512_sfi) || + (hw->phy.type == ngbe_phy_internal_yt8521s_sfi)) { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); +#endif + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &yt_mode); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if((yt_mode & 7) == 0) {//utp_to_rgmii + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + } else { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, supported,1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, supported,1000baseT_Full); +#endif + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported,100baseT_Full); + } + } else if ((hw->phy.type == ngbe_phy_internal) || + (hw->phy.type == ngbe_phy_m88e1512)) { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + + } else { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + } + + /* set the advertised speeds */ + if ((hw->phy.type == ngbe_phy_m88e1512_sfi) || + (hw->phy.type == ngbe_phy_internal_yt8521s_sfi)) { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } + }else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + if ((yt_mode & 7) == 0) { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + } + } else { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } + } + } else { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } + } + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (autoneg) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ngbe_phy_internal: + case ngbe_phy_m88e1512: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case ngbe_phy_yt8521s_sfi: + if((yt_mode & 7) == 0) {//utp_to_rgmii + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + } else { + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + } + break; + case ngbe_phy_internal_yt8521s_sfi: + case ngbe_phy_m88e1512_sfi: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case ngbe_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case ngbe_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + break; + case ngbe_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case NGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case NGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + return 0; +} +#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +int ngbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + bool link_up = 0; + u16 value = 0; + unsigned long flags; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + } + + ecmd->supported |= SUPPORTED_Autoneg; + if (autoneg) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ngbe_phy_internal: + case ngbe_phy_m88e1512: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ngbe_phy_sfp_passive_tyco: + case ngbe_phy_sfp_passive_unknown: + case ngbe_phy_sfp_ftl: + case ngbe_phy_sfp_avago: + case ngbe_phy_sfp_intel: + case ngbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ngbe_sfp_type_da_cu: + case ngbe_sfp_type_da_cu_core0: + case ngbe_sfp_type_da_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case ngbe_sfp_type_sr: + case ngbe_sfp_type_lr: + case ngbe_sfp_type_srlr_core0: + case ngbe_sfp_type_srlr_core1: + case ngbe_sfp_type_1g_sx_core0: + case ngbe_sfp_type_1g_sx_core1: + case ngbe_sfp_type_1g_lx_core0: + case ngbe_sfp_type_1g_lx_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ngbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ngbe_sfp_type_1g_cu_core0: + case ngbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ngbe_sfp_type_unknown: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + break; + case ngbe_phy_yt8521s_sfi: + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if((value & 7) == 0) {/* utp_to_rgmii */ + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + break; + case ngbe_phy_internal_yt8521s_sfi: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ngbe_phy_unknown: + case ngbe_phy_generic: + case ngbe_phy_sfp_unsupported: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case ngbe_fc_full: + ecmd->advertising |= ADVERTISED_Pause; + break; + case ngbe_fc_rx_pause: + ecmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case ngbe_fc_tx_pause: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_1GB_FULL: + ecmd->speed = SPEED_1000; + break; + case NGBE_LINK_SPEED_100_FULL: + ecmd->speed = SPEED_100; + break; + case NGBE_LINK_SPEED_10_FULL: + ecmd->speed = SPEED_10; + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + return 0; +} +#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ngbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 advertised; + int err = 0; + struct ethtool_link_ksettings temp_ks; + + if (!netif_running(netdev)) + return -EINVAL; + + if ((hw->phy.media_type == ngbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + /* To be compatible with test cases */ + if ((hw->phy.type == ngbe_phy_m88e1512_sfi) || + (hw->phy.type == ngbe_phy_yt8521s_sfi) || + (hw->phy.type == ngbe_phy_internal_yt8521s_sfi)) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); +#endif + } + } + + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + advertised = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = true; +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full)) +#else + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) +#endif + advertised |= NGBE_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) + advertised |= NGBE_LINK_SPEED_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) + advertised |= NGBE_LINK_SPEED_10_FULL; + } else { + if (cmd->base.duplex == DUPLEX_HALF) { + e_err(probe, "unsupported duplex\n"); + return -EINVAL; + } + + switch (cmd->base.speed) { + case SPEED_10: + advertised = NGBE_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertised = NGBE_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertised = NGBE_LINK_SPEED_1GB_FULL; + break; + default: + e_err(probe, "unsupported speed\n"); + return -EINVAL; + } + + hw->mac.autoneg = false; + } + + hw->mac.autotry_restart = true; + hw->phy.ops.setup_link(hw, advertised, true); + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->phy.autoneg_advertised = advertised; + } else { + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_UNKNOWN; + hw->phy.force_speed = advertised; + } + } else { + /* in this case we currently only support 1Gb/FULL */ + u32 speed = cmd->base.speed; + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (!ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) || + (speed + cmd->base.duplex != SPEED_1000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +static int ngbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 advertised, old; + int err = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + if ((hw->phy.media_type == ngbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (ecmd->advertising & ~ecmd->supported) { + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = true; + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= NGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= NGBE_LINK_SPEED_100_FULL; + + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= NGBE_LINK_SPEED_10_FULL; + + + } else { + if (ecmd->duplex == DUPLEX_HALF) { + e_err(probe, "unsupported duplex\n"); + return -EINVAL; + } + + switch (ecmd->speed) { + case SPEED_10: + advertised = NGBE_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertised = NGBE_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertised = NGBE_LINK_SPEED_1GB_FULL; + break; + default: + e_err(probe, "unsupported speed\n"); + return -EINVAL; + } + hw->mac.autoneg = false; + } + + hw->mac.autotry_restart = true; + hw->phy.ops.setup_link(hw, advertised, true); + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->phy.autoneg_advertised = advertised; + } else { + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_UNKNOWN; + hw->phy.force_speed = advertised; + } + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} +#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +static void ngbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + if (!hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == ngbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ngbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_fc_info fc = hw->fc; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ngbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = ngbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = ngbe_fc_tx_pause; + else + fc.requested_mode = ngbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ngbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} + +static u32 ngbe_get_msglevel(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ngbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ngbe_get_regs_len(struct net_device __always_unused *netdev) +{ +#define NGBE_REGS_LEN 4096 + return NGBE_REGS_LEN * sizeof(u32); +} + +#define NGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + + +static void ngbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, NGBE_REGS_LEN * sizeof(u32)); + regs_buff[NGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PWR);//0 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_CTL);//1 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PF_SM);//2 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST);//3 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_ST);//4 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_SWSM);//5 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_CTL);//7 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_EN);//8 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ST);//9 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_INT_EN);//12 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMD);//14 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_DATA);//15 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_STATUS);//16 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_USR_CMD);//17 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_ST);//23 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TCP_TIME);//25 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_LED_CTL);//26 + /* GPIO */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DR);//27 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DDR);//28 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_CTL);//29 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTEN);//30 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTMASK);//31 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTSTATUS);//32 + /* TX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_TDESC);//33 + /* RX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RDESC);//34 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RHDR);//35 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RPL);//36 + + /* TDMA */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_CTL);//37 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_POOL_TE);//38 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PB_THRE);//39 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_LLQ);//40 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_LB_L);//41 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_AS_L);//42 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_MAC_AS_L);//43 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_AS_L);//44 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_L);//45 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_H);//46 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_INS(i));//47-54 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETAG_INS(i));//55-62 + } + /* Transmit QOS */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PBWARB_CTL);//63 + + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_DRP_CNT);//64 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_SEC_DRP);//65 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PKT_CNT);//66 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_L);//67 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_H);//68 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_OS2BMC_CNT);//69 + + /* RDMA */ + /* receive control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_ARB_CTL);//70 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_POOL_RE);//71 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_QDE);//72 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_HIDE);//73 + /* static */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_DRP_PKT);//74 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PKT_CNT);//75 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_L);//76 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_H);//77 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BMC2OS_CNT);//78 + + /* RDB */ + /*flow control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCV);//79 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCL);//80 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCH);//81 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCRT);//82 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCC);//83 + /* receive packet buffer */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_CTL);//84 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_SZ);//85 + + /* lli interrupt */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LLI_THRE);//86 + /* ring assignment */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PL_CFG(i));//87-94 + } + for (i = 0; i < 32; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSTBL(i));//95-126 + } + for (i = 0; i < 10; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSRK(i));//127-136 + } + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RA_CTL);//137 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_SDP(i));//138-145 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL0(i));//146-153 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL1(i));//154-161 + } + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_SYN_CLS);//162 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_ETYPE_CLS(i));//163-170 + } + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_MPCNT);//171 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PKT_CNT);//172 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_REPLI_CNT);//173 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_DRP_CNT);//174 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXONTXC);//175 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXOFFTXC);//176 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAL);//177 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAH);//178 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_TXSWERR);//179 + + /* PSR */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_CTL);//180 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAX_SZ);//181 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_CTL);//182 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_CTL);//183 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_PKT_CNT);//184 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_PKT_CNT);//185 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_DBG_DOP_CNT);//186 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_DOP_CNT);//187 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_FLP_L);//188 + + /* vm l2 control */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_L2CTL(i));//189-196 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_ETYPE_SWC(i));//197-204 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MC_TBL(i));//205-332 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_UC_TBL(i));///333-460 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_TBL(i));//461-588 + } + /* mac switcher */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_L);//589 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_H);//590 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_VM);//591 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_IDX);//592 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC);//593 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_VM_L);//594 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_IDX);//595 + + /* mirror */ + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_CTL(i));//596-599 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VLAN_L(i));//600-603 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VM_L(i));//604-607 + } + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_CTL);//608 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPL);//609 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPH);//610 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRL);//611 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRH);//612 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_MSGTYPE);//613 + /* wake up */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_CTL);//614 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IPV);//615 + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP4TBL(i));//616-619 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP6TBL(i));//620-623 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_L(i));//624-639 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_H(i));//640-655 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_MSK(i));//656-671 + } + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_CTL);//672 + + /* TDB */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_TFCS);//673 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PB_SZ);//674 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PBRARB_CTL);//675 + /* statistic */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_OUT_PKT_CNT);//676 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_PKT_CNT);//677 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_LB_PKT_CNT);//678 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_LARGE_DOP_CNT);//679 + + /* TSEC */ + /* general tsec */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_CTL);//680 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_ST);//681 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AF);//682 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AE);//683 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_MIN_IFG);//684 + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_CTL);//685 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPL);//686 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPH);//687 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIML);//688 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIMH);//689 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INC);//690 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJL);//691 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJH);//692 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_ST);//693 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_EN);//694 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_AUX_CTL);//695 + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SDP(i));//696-699 + } + + /* RSEC */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_CTL);//700 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_ST);//701 + /* mac wrapper */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_CFG);//702 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_CFG);//703 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_PKT_FLT);//704 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_WDG_TIMEOUT);//705 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_FLOW_CTRL);//706 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_FLOW_CTRL);//707 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_ST);//708 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_EN);//709 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);//710 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);//711 + + /* BAR register */ + /* pf interrupt register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IC);//712 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_ICS);//713 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IEN);//714 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_GPIE);//715 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IC);//716 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ICS);//717 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMS);//718 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMC);//719 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_L);//720 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_H);//721 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITRSEL);//722 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITR(i));//723-730 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IVAR(i));//731-734 + } + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IVAR);//735 + /* pf receive ring register */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAL(i));//736-743 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAH(i));//744-751 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_WP(i));//752-759 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_RP(i));//760-767 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_CFG(i));//768-775 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAL(i));//776-783 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAH(i));//784-791 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_WP(i));//792-709 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_RP(i));//800-807 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_CFG(i));//808-815 + } +} + +static int ngbe_get_eeprom_len(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ngbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int ngbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->eeprom.ops.write_buffer(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + hw->eeprom.ops.update_checksum(hw); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void ngbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ngbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ngbe_driver_version, + sizeof(drvinfo->version) - 1); + strncpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + drvinfo->n_stats = NGBE_STATS_LEN - + (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + }else{ + drvinfo->n_stats = NGBE_STATS_LEN; + } + drvinfo->testinfo_len = NGBE_TEST_LEN; + drvinfo->regdump_len = ngbe_get_regs_len(netdev); +} + +static void ngbe_get_ringparam(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NGBE_MAX_RXD; + ring->tx_max_pending = NGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ngbe_set_ringparam(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + NGBE_MIN_TXD, NGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, NGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + NGBE_MIN_RXD, NGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, NGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ngbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + ngbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct ngbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ngbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ngbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct ngbe_ring)); +#ifdef HAVE_XDP_BUFF_RXQ + xdp_rxq_info_unreg(&temp_ring[i].xdp_rxq); +#endif + temp_ring[i].count = new_rx_count; + err = ngbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + + for (i = 0; i < adapter->num_rx_queues; i++) { + ngbe_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + ngbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__NGBE_RESETTING, &adapter->state); + return err; +} + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ngbe_get_stats_count(struct net_device *netdev) +{ + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + return NGBE_STATS_LEN - (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + }else{ + return NGBE_STATS_LEN; + } +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static int ngbe_get_sset_count(struct net_device *netdev, int sset) +{ +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct ngbe_adapter *adapter = netdev_priv(netdev); +#endif +#endif + struct ngbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return NGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + return NGBE_STATS_LEN - (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + }else{ + return NGBE_STATS_LEN; + } + case ETH_SS_PRIV_FLAGS: + return NGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +/** + * ngbe_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the ngbe_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 ngbe_get_priv_flags(struct net_device *dev) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u32 i , ret_flags = 0; + + for (i = 0; i < NGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct ngbe_priv_flags *priv_flags; + + priv_flags = &ngbe_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->eth_priv_flags) + ret_flags |= BIT(i); + } + return ret_flags; +} + +/** + * ngbe_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int ngbe_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u32 orig_flags, new_flags, changed_flags; + u32 i; + int status = 0; + + orig_flags = adapter->eth_priv_flags; + new_flags = orig_flags; + + if (!netif_running(dev)) + return -EINVAL; + + for (i = 0; i < NGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct ngbe_priv_flags *priv_flags; + + priv_flags = &ngbe_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; + } + + changed_flags = orig_flags ^ new_flags; + + if(!changed_flags) return 0; + + if (changed_flags & NGBE_ETH_PRIV_FLAG_LLDP) { + status = ngbe_hic_write_lldp(&adapter->hw, (u32)(new_flags & NGBE_ETH_PRIV_FLAG_LLDP)); + if(!status) + adapter->eth_priv_flags = new_flags; + } + + return status; +} + + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static void ngbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif + u64 *queue_stat; + int stat_count, k; +#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; +#endif + struct ngbe_ring *ring, *xdp_ring; + int i, j; + char *p; + + ngbe_update_stats(adapter); + + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + ngbe_gstrings_net_stats[i].stat_offset; + data[i] = (ngbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < NGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + ngbe_gstrings_stats[j].stat_offset; + data[i] = (ngbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + xdp_ring = adapter->xdp_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + if (xdp_ring) { +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&xdp_ring->syncp); +#endif + data[i] += xdp_ring->stats.packets; + data[i+1] += xdp_ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&xdp_ring->syncp, start)); +#endif + } + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + if (xdp_ring) { + data[i] += xdp_ring->stats.yields; + data[i+1] += xdp_ring->stats.misses; + data[i + 2] += xdp_ring->stats.cleaned; + } + i += 3; +#endif + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } + + stat_count = sizeof(struct vf_stats) / sizeof(u64); + for (j = 0; j < adapter->num_vfs; j++) { + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] += queue_stat[k]; + i += k; + } +} + +static void ngbe_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < NGBE_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + ngbe_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + +static void ngbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ngbe_gstrings_test, + NGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { /*temp setting2*/ + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_rx_queues; i++) { /*temp setting2*/ + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_vfs; i++) { + sprintf(p, "VF %d Rx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Rx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d MC Packets", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != NGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + ngbe_get_priv_flag_strings(netdev, data); + break; + } +} + +static int ngbe_link_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + bool link_up = 0; + u32 link_speed = 0; + + if (NGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ngbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + + +/* default sapphire register test */ +static struct ngbe_reg_test reg_test_sapphire[] = { + { NGBE_RDB_RFCL, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { NGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { NGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, NGBE_PX_RR_CFG_RR_EN }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCV, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { NGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { NGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + + +static bool reg_pattern_test(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected" + "0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + + + + +static bool ngbe_reg_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_reg_test *test; + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static bool ngbe_eeprom_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 devcap; + + if (hw->eeprom.ops.eeprom_chksum_cap_st(hw, NGBE_CALSUM_COMMAND, &devcap)) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + +static irqreturn_t ngbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ngbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = ngbe_misc_isb(adapter, NGBE_ISB_VEC1); + icr <<= 32; + icr |= ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + + + +static int ngbe_intr_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &ngbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &ngbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &ngbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ngbe_irq_disable(adapter); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, ~mask); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 0; + break; + } + } + + /* Disable all the interrupts */ + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ngbe_free_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + hw->mac.ops.disable_rx(hw); + ngbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, NGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); + + ngbe_reset(adapter); + + ngbe_free_tx_resources(&adapter->test_tx_ring); + ngbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ngbe_setup_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = NGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = ngbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + wr32m(hw, NGBE_RSEC_CTL, 0x2, 0); + ngbe_configure_tx_ring(adapter, tx_ring); + + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_MASK, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_1G); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = NGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = NGBE_RXBUFFER_2K; +#endif + + err = ngbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + hw->mac.ops.disable_rx(hw); + ngbe_configure_rx_ring(adapter, rx_ring); + hw->mac.ops.enable_rx(hw); + + return 0; + +err_nomem: + ngbe_free_desc_rings(adapter); + return ret_val; +} + +static int ngbe_setup_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup MAC loopback */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, NGBE_MAC_RX_CFG_LM); + + reg_data = rd32(hw, NGBE_PSR_CTL); + reg_data |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_UPE | + NGBE_PSR_CTL_MPE | NGBE_PSR_CTL_TPE; + wr32(hw, NGBE_PSR_CTL, reg_data); + + wr32(hw, 0x17000, + (rd32(hw, 0x17000 )| + 0x00000040U) & ~0x1U); + + wr32(hw, 0x17204, 0x4); + wr32(hw, NGBE_PSR_VLAN_CTL, + rd32(hw, NGBE_PSR_VLAN_CTL) & + ~NGBE_PSR_VLAN_CTL_VFE); + + NGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static void ngbe_loopback_cleanup(struct ngbe_adapter *adapter) +{ + wr32m(&adapter->hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, ~NGBE_MAC_RX_CFG_LM); +} + + +static void ngbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool ngbe_check_lbtest_frame(struct ngbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else + data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + +#endif + return match; +} + +static u16 ngbe_clean_test_rings(struct ngbe_ring *rx_ring, + struct ngbe_ring *tx_ring, + unsigned int size) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer; + struct ngbe_tx_buffer *tx_buffer; +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buf_len; +#else + const int bufsz = ngbe_rx_bufsz(rx_ring); +#endif + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + + while (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); +#else + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); +#endif + /* verify contents of skb */ + if (ngbe_check_lbtest_frame(rx_buffer, size)) + count++; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); +#else + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); +#endif + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + ngbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ngbe_run_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + //struct ngbe_hw *hw = &adapter->hw; + + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~NGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ngbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ngbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + msleep(10); + //e_dev_info("====hw_cnt = %d====\n", rd32(hw, 0x18308)); + + + if (good_cnt != 64) { + ret_val = 12; + // e_dev_err("====tran_cnt = %d====\n", good_cnt); + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = ngbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int ngbe_loopback_test(struct ngbe_adapter *adapter, u64 *data) +{ + *data = ngbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ngbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ngbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "mac loopback testing failed\n"); + ngbe_loopback_cleanup(adapter); + +err_loopback: + ngbe_free_desc_rings(adapter); +out: + return *data; +} + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ngbe_diag_test_count(struct net_device __always_unused *netdev) +{ + return NGBE_TEST_LEN; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static void ngbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct ngbe_hw *hw = &adapter->hw; + + e_dev_info("ngbe_diag_test: start test\n"); + + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__NGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__NGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + ngbe_close(netdev); + else{ + msleep(20); + ngbe_reset(adapter); + } + + + e_info(hw, "register testing starting\n"); + + + if (ngbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + msleep(20); + ngbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ngbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + msleep(20); + ngbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ngbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (!hw->ncsi_enabled){ + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (NGBE_FLAG_SRIOV_ENABLED | + NGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + e_info(hw, "loopback testing starting\n"); + ngbe_loopback_test(adapter, &data[3]); + } + + + data[3] = 0; + +skip_loopback: + msleep(20); + ngbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__NGBE_TESTING, &adapter->state); + if (if_running) + ngbe_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__NGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static int ngbe_wol_exclusion(struct ngbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + int retval = 0; + + /* WOL not supported for all devices */ + if (!ngbe_wol_supported(adapter)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +static void ngbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + struct ngbe_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (ngbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if (adapter->wol & NGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; + + if ( !((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP)) + wol->wolopts = 0; +} + +static int ngbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 slot = hw->bus.lan_id; + u16 value; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ngbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + if ( !((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP)) + return -EOPNOTSUPP; + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC){ + adapter->wol |= NGBE_PSR_WKUP_CTL_MAG; + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*enable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value | ( 1 << slot) ); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + return 0; + } + + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*disable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value & ~( 1 << slot )); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + return 0; +} + +static int ngbe_nway_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int ngbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA00B, 0, (u16*)&adapter->led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 16, 0, (u16*)&adapter->led_reg); + } else + adapter->led_reg = rd32(hw, NGBE_CFG_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, adapter->led_reg | 0x140); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x9); + } else + hw->mac.ops.led_on(hw, NGBE_LED_LINK_1G); + break; + + case ETHTOOL_ID_OFF: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, adapter->led_reg | 0x100); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x8); + } else + hw->mac.ops.led_off(hw, NGBE_LED_LINK_100M | NGBE_LED_LINK_1G); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, adapter->led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.write_reg_mdi(hw, 16, 0, adapter->led_reg); + } else + wr32(&adapter->hw, NGBE_CFG_LED_CTL, + adapter->led_reg); + break; + } + + return 0; +} +#else +static int ngbe_phys_id(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 led_reg; + u32 i; + + if (!data || data > 300) + data = 300; + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA00B, 0, (u16*)&led_reg); + for (i = 0; i < (data * 1000); i += 400) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, led_reg | 0x140); + msleep_interruptible(200); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, led_reg | 0x100); + msleep_interruptible(200); + } + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 16, 0, (u16*)&led_reg); + for (i = 0; i < (data * 1000); i += 400) { + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x9); + msleep_interruptible(200); + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x8); + msleep_interruptible(200); + } + hw->phy.ops.write_reg_mdi(hw, 16, 0, led_reg); + } else { + led_reg = rd32(hw, NGBE_CFG_LED_CTL); + for (i = 0; i < (data * 1000); i += 400) { + hw->mac.ops.led_on(hw, NGBE_LED_LINK_1G); + msleep_interruptible(200); + hw->mac.ops.led_off(hw, NGBE_LED_LINK_100M | NGBE_LED_LINK_1G); + msleep_interruptible(200); + } + /* Restore LED settings */ + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + } + + + return 0; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + +static int ngbe_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ngbe_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (NGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (NGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = NGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = NGBE_20K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < NGBE_70K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= NGBE_70K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < NGBE_70K_ITR)) + need_reset = true; + } + + /* hw->mac.ops.dmac_config is null*/ + if (hw->mac.ops.dmac_config && + adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling " + "is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + hw->mac.ops.dmac_config(hw); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ngbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +#ifndef HAVE_NDO_SET_FEATURES +static u32 ngbe_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int ngbe_set_rx_csum(struct net_device *netdev, u32 data) +{ +#ifdef HAVE_VXLAN_RX_OFFLOAD + struct ngbe_adapter *adapter = netdev_priv(netdev); +#endif + bool need_reset = false; + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + if (!data && (netdev->features & NETIF_F_LRO)) { + netdev->features &= ~NETIF_F_LRO; + } + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + if (!need_reset) + adapter->flags2 |= NGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + ngbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +static int ngbe_set_tx_csum(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (data) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + else + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + feature_list |= NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + feature_list |= NETIF_F_SCTP_CSUM; + + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +static int ngbe_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int ngbe_set_flags(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; +#ifndef HAVE_VLAN_RX_REGISTER + u32 changed = netdev->features ^ data; +#endif + bool need_reset = false; + int rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if ((adapter->flags & NGBE_FLAG_DCB_ENABLED) && + !(data & ETH_FLAG_RXVLAN)) + return -EINVAL; + +#endif + supported_flags |= ETH_FLAG_LRO; + +#ifdef ETHTOOL_GRXRINGS + + supported_flags |= ETH_FLAG_NTUPLE; + + +#endif +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; + +#endif + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if (changed & ETH_FLAG_RXVLAN) + ngbe_vlan_mode(netdev, netdev->features); + +#endif + +#ifdef HAVE_VXLAN_CHECKS + if (adapter->flags & NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + netdev->features & NETIF_F_RXCSUM) { + vxlan_get_rx_port(netdev); + else + ngbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef ETHTOOL_GRXRINGS + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (netdev->features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & NGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & NGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + break; + } + +#endif /* ETHTOOL_GRXRINGS */ + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS + +static int ngbe_get_rss_hash_opts(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ngbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ngbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + break; + case ETHTOOL_GRXCLSRLALL: + break; + case ETHTOOL_GRXFH: + ret = ngbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#ifdef ETHTOOL_SRXNTUPLE +/* + * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +static int ngbe_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd) +{ + return -EOPNOTSUPP; +} + +#endif +#define UDP_RSS_FLAGS (NGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + NGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int ngbe_set_rss_hash_opt(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ngbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, NGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4 + | NGBE_RDB_RA_CTL_RSS_IPV4_TCP + | NGBE_RDB_RA_CTL_RSS_IPV6 + | NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(NGBE_RDB_RA_CTL_RSS_IPV4_UDP | + NGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, NGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int ngbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + break; + case ETHTOOL_SRXCLSRLDEL: + break; + case ETHTOOL_SRXFH: + ret = ngbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +static int ngbe_rss_indir_tbl_max(struct ngbe_adapter *adapter) +{ + return 64; +} + +static u32 ngbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ngbe_rss_indir_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return ngbe_rss_indir_tbl_entries(adapter); +} + +static void ngbe_get_reta(struct ngbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ngbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#else /* HAVE_RXFH_HASHFUNC */ +//#ifdef HAVE_RXFH_HASHKEY +static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +//#else +//static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir) +//#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + if (indir) + ngbe_get_reta(adapter, indir); +//#ifdef HAVE_RXFH_HASHKEY + if (key) + memcpy(key, adapter->rss_key, ngbe_get_rxfh_key_size(netdev)); +//#endif + + return 0; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +//#ifdef HAVE_RXFH_HASHKEY +static int ngbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +//#else +//static int ngbe_set_rxfh(struct net_device *netdev, u32 *indir) +//#endif +#else /* HAVE_RXFH_NONCONST */ +//#ifdef HAVE_RXFH_HASHKEY +static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +//#else +//static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir) +//#endif +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + return -EINVAL; +#endif + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ngbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + +//#ifdef HAVE_RXFH_HASHKEY + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ngbe_get_rxfh_key_size(netdev)); +//#endif + + ngbe_store_reta(adapter); + + return 0; +} +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ + +#ifdef HAVE_ETHTOOL_GET_TS_INFO +static int ngbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + +#ifdef HAVE_PTP_1588_CLOCK + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + +#endif /* HAVE_PTP_1588_CLOCK */ + return 0; +} +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_SCHANNELS +static unsigned int ngbe_max_channels(struct ngbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = NGBE_MAX_FDIR_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = ngbe_max_rss_indices(adapter); + } + if (adapter->xdp_prog) + return max_combined = NGBE_MAX_RSS_INDICES / 2; + + return max_combined; +} + +static void ngbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ngbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + if (adapter->xdp_prog) + ch->combined_count = min(ch->combined_count, (u32)(NGBE_MAX_RSS_INDICES / 2)); + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + +} + +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = ngbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ngbe_max_channels(adapter)) + return -EINVAL; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev), 0); +} +#endif /* ETHTOOL_SCHANNELS */ + +#if 0 +#ifdef ETHTOOL_GMODULEINFO +static int ngbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) + + +static int ngbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +#endif /* ETHTOOL_GMODULEINFO */ +#endif + +#ifdef ETHTOOL_GEEE +static int ngbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} +#endif /* ETHTOOL_GEEE */ + +#ifdef ETHTOOL_SEEE +static int ngbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + int ret_val; + + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & NGBE_FLAG2_EEE_CAPABLE))) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ngbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not " + "supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not " + "supported\n"); + return -EINVAL; + } + + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + + if (edata->eee_enabled) + adapter->flags2 |= NGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~NGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} +#endif /* ETHTOOL_SEEE */ + +static int ngbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (ef->region == 0) { + ret = ngbe_upgrade_flash(&adapter->hw, ef->region, + fw->data, fw->size); + } else { + if (ngbe_mng_present(&adapter->hw)) { + ret = ngbe_upgrade_flash_hostif(&adapter->hw, ef->region, + fw->data, fw->size); + } else + ret = -EOPNOTSUPP; + } + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reboot to make firmware work\n", ef->data); + return ret; +} + + +static struct ethtool_ops ngbe_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + .get_link_ksettings = ngbe_get_link_ksettings, + .set_link_ksettings = ngbe_set_link_ksettings, +#else + .get_settings = ngbe_get_settings, + .set_settings = ngbe_set_settings, +#endif + .get_drvinfo = ngbe_get_drvinfo, + .get_regs_len = ngbe_get_regs_len, + .get_regs = ngbe_get_regs, + .get_wol = ngbe_get_wol, + .set_wol = ngbe_set_wol, + .nway_reset = ngbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ngbe_get_eeprom_len, + .get_eeprom = ngbe_get_eeprom, + .set_eeprom = ngbe_set_eeprom, + .get_ringparam = ngbe_get_ringparam, + .set_ringparam = ngbe_set_ringparam, + .get_pauseparam = ngbe_get_pauseparam, + .set_pauseparam = ngbe_set_pauseparam, + .get_msglevel = ngbe_get_msglevel, + .set_msglevel = ngbe_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = ngbe_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = ngbe_diag_test, + .get_strings = ngbe_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = ngbe_set_phys_id, +#else + .phys_id = ngbe_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = ngbe_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = ngbe_get_sset_count, + .get_priv_flags = ngbe_get_priv_flags, + .set_priv_flags = ngbe_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = ngbe_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = ngbe_get_coalesce, + .set_coalesce = ngbe_set_coalesce, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = ngbe_get_rx_csum, + .set_rx_csum = ngbe_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ngbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ngbe_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = ngbe_set_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = ngbe_get_rxnfc, + .set_rxnfc = ngbe_set_rxnfc, +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = ngbe_set_rx_ntuple, +#endif +#endif /* ETHTOOL_GRXRINGS */ +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + + +#if 0 +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +#ifdef HAVE_RXFH_HASHKEY + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, +#else/* HAVE_RXFH_HASHKEY */ + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_indir = ngbe_get_rxfh, + .set_rxfh_indir = ngbe_set_rxfh, +#endif /* HAVE_RXFH_HASHKEY */ +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif + +#ifdef ETHTOOL_GEEE + .get_eee = ngbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ngbe_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef ETHTOOL_SCHANNELS + .get_channels = ngbe_get_channels, + .set_channels = ngbe_set_channels, +#endif +#if 0 +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ngbe_get_module_info, + .get_module_eeprom = ngbe_get_module_eeprom, +#endif +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = ngbe_get_ts_info, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + .flash_device = ngbe_set_flash, +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext ngbe_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = ngbe_get_ts_info, + .set_phys_id = ngbe_set_phys_id, + .get_channels = ngbe_get_channels, + .set_channels = ngbe_set_channels, +#if 0 +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ngbe_get_module_info, + .get_module_eeprom = ngbe_get_module_eeprom, +#endif +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GEEE + .get_eee = ngbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ngbe_set_eee, +#endif /* ETHTOOL_SEEE */ +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +void ngbe_set_ethtool_ops(struct net_device *netdev) +{ +#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &ngbe_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &ngbe_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &ngbe_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} +#endif /* SIOCETHTOOL */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c new file mode 100644 index 000000000000..f304dadb8589 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -0,0 +1,4953 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_type.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#include "ngbe.h" + +#define NGBE_SP_MAX_TX_QUEUES 8 +#define NGBE_SP_MAX_RX_QUEUES 8 +#define NGBE_SP_RAR_ENTRIES 32 +#define NGBE_SP_MC_TBL_SIZE 128 +#define NGBE_SP_VFT_TBL_SIZE 128 +#define NGBE_SP_RX_PB_SIZE 42 + +u32 ngbe_rd32_epcs(struct ngbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_XPCS_IDA_DATA; + data = rd32(hw, portRegOffset); + + return data; +} + + +void ngbe_wr32_ephy(struct ngbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_ETHPHY_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +void ngbe_wr32_epcs(struct ngbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_XPCS_IDA_DATA; + wr32(hw, portRegOffset, data); +} + + + +/** + * ngbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + /* ??? max_msix_count for emerald */ + max_msix_count = NGBE_MAX_MSIX_VECTORS_EMERALD; + pos = pci_find_capability(((struct ngbe_adapter *)hw->back)->pdev, + PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct ngbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (NGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= NGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ngbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +int ngbe_init_hw(struct ngbe_hw *hw) +{ + int status; + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == 0) + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + + return status; +} + + +/** + * ngbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +int ngbe_clear_hw_cntrs(struct ngbe_hw *hw) +{ + u16 i = 0; + + rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RDB_LXONTXC); + rd32(hw, NGBE_RDB_LXOFFTXC); + /* ??? 1e0c not found */ + /* rd32(hw, NGBE_MAC_LXONRXC); */ + rd32(hw, NGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + /* ??? move 16? */ + wr32m(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP, i<<16); + rd32(hw, NGBE_MAC_PXOFFRXC); + } + + for (i = 0; i < 8; i++) { + wr32(hw, NGBE_PX_MPRC(i), 0); + } + /* BPRC */ + + rd32(hw, NGBE_PX_GPRC); + rd32(hw, NGBE_PX_GPTC); + rd32(hw, NGBE_PX_GORC_MSB); + rd32(hw, NGBE_PX_GOTC_MSB); + + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RDM_DRP_PKT); + return 0; +} + + + +/** + * ngbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +int ngbe_setup_fc(struct ngbe_hw *hw) +{ + int ret_val = 0; + u16 pcap_backplane = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "ngbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ngbe_fc_default) + hw->fc.requested_mode = ngbe_fc_full; + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ngbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case ngbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x100; + break; + case ngbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x80; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + } + + /* + * AUTOC restart handles negotiation of 1G on backplane + * and copper. + */ + if ((hw->phy.media_type == ngbe_media_type_copper) && + !((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + ret_val = hw->phy.ops.set_adv_pause(hw, pcap_backplane); + } + +out: + return ret_val; +} + + +/** + * ngbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +int ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/** + * ngbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the ngbe_hw structure + **/ +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status) +{ + if (hw->bus.type == ngbe_bus_type_unknown) + hw->bus.type = ngbe_bus_type_pci_express; + + switch (link_status & NGBE_PCI_LINK_WIDTH) { + case NGBE_PCI_LINK_WIDTH_1: + hw->bus.width = PCIE_LNK_X1; + break; + case NGBE_PCI_LINK_WIDTH_2: + hw->bus.width = PCIE_LNK_X2; + break; + case NGBE_PCI_LINK_WIDTH_4: + hw->bus.width = PCIE_LNK_X4; + break; + case NGBE_PCI_LINK_WIDTH_8: + hw->bus.width = PCIE_LNK_X8; + break; + default: + hw->bus.width = PCIE_LNK_WIDTH_UNKNOWN; + break; + } + + switch (link_status & NGBE_PCI_LINK_SPEED) { + case NGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = PCIE_SPEED_2_5GT; + break; + case NGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = PCIE_SPEED_5_0GT; + break; + case NGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = PCIE_SPEED_8_0GT; + break; + default: + hw->bus.speed = PCI_SPEED_UNKNOWN; + break; + } +} + +/** + * ngbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ngbe_hw structure. + **/ +int ngbe_get_bus_info(struct ngbe_hw *hw) +{ + u16 link_status; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = NGBE_READ_PCIE_WORD(hw, NGBE_PCI_LINK_STATUS); + + ngbe_set_pci_config_data(hw, link_status); + + return 0; +} + +/** + * ngbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw) +{ + struct ngbe_bus_info *bus = &hw->bus; + u32 reg = 0; + + reg = rd32(hw, NGBE_CFG_PORT_ST); + bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg); + bus->func = bus->lan_id; +} + +/** + * ngbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ngbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +int ngbe_stop_adapter(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + hw->mac.ops.disable_rx(hw); + + /* Set interrupt mask to stop interrupts from being generated */ + ngbe_intr_disable(hw, NGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, NGBE_PX_MISC_IC, 0xffffffff); + + /* ??? 0bit RW->RO */ + wr32(hw, NGBE_BME_CTL, 0x3); + + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, NGBE_PX_TR_CFG(i), + NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE, + NGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, NGBE_PX_RR_CFG(i), + NGBE_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + NGBE_WRITE_FLUSH(hw); + msec_delay(2); + + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + if (!(adapter->flags2 & NGBE_FLAG2_ECC_ERR_RESET)) + return ngbe_disable_pcie_master(hw); + else + return 0; +} + +/** + * ngbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +int ngbe_led_on(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + /* ??? */ + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +int ngbe_led_off(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * ngbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw) +{ + if (ngbe_check_mng_access(hw)) { + wr32m(hw, NGBE_MIS_SWSM, + NGBE_MIS_SWSM_SMBI, 0); + NGBE_WRITE_FLUSH(hw); + } +} + +/** + * ngbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static int ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) +{ + int status = NGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ngbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) + status = 0; + } + + return status; +} + +/** + * ngbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +int ngbe_validate_mac_addr(u8 *mac_addr) +{ + int status = 0; + + /* Make sure it is not a multicast address */ + if (NGBE_IS_MULTICAST(mac_addr)) { + DEBUGOUT("MAC address is multicast\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (NGBE_IS_BROADCAST(mac_addr)) { + DEBUGOUT("MAC address is broadcast\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + DEBUGOUT("MAC address is all zeros\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ngbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +int ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* select the MAC address */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xFFFFFFFF); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV; + + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * ngbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +int ngbe_clear_rar(struct ngbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, NGBE_PSR_MAC_SWC_VM, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * ngbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +int ngbe_init_rx_addrs(struct ngbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ngbe_validate_mac_addr(hw->mac.addr) == + NGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, + NGBE_PSR_MAC_SWC_AD_H_AV); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, NGBE_PSR_MC_TBL(i), 0); + + hw->mac.ops.init_uta_tables(hw); + + return 0; +} + +/** + * ngbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +static void ngbe_add_uc_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ngbe_add_uc_addr Complete\n"); +} + +/** + * ngbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +int ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *addr_list, + u32 addr_count, ngbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use + 1); + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 1 + i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ngbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, NGBE_PSR_CTL_UPE); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, 0); + } + } + + DEBUGOUT("ngbe_update_uc_addr_list Complete\n"); + return 0; +} + +/** + * ngbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static int ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ngbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ngbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ngbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +int ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ngbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, NGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + DEBUGOUT("ngbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * ngbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +int ngbe_enable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +int ngbe_disable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +int ngbe_fc_enable(struct ngbe_hw *hw) +{ + int ret_val = 0; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + + /* Low water mark of zero causes XOFF floods */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) { + if (!hw->fc.low_water || hw->fc.low_water >= hw->fc.high_water) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + + /* Negotiate the fc mode to use */ + ngbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, NGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~NGBE_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(hw, NGBE_RDB_RFCC); + fccfg_reg &= ~NGBE_RDB_RFCC_RFCE_802_3X; + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ngbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ngbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case ngbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, NGBE_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && + hw->fc.high_water) { + /* 32Byte granularity */ + fcrtl = (hw->fc.low_water << 10) | + NGBE_RDB_RFCL_XONE; + wr32(hw, NGBE_RDB_RFCL, fcrtl); + fcrth = (hw->fc.high_water << 10) | + NGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, NGBE_RDB_RFCL, 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, NGBE_RDB_PB_SZ) - 24576; + } + + wr32(hw, NGBE_RDB_RFCH, fcrth); + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010000; + wr32(hw, NGBE_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ngbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static int ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(NGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return NGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ngbe_fc_full) { + hw->fc.current_mode = ngbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ngbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ngbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ngbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static int ngbe_fc_autoneg_copper(struct ngbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + hw->phy.ops.get_adv_pause(hw, &technology_ability_reg); + hw->phy.ops.get_lp_adv_pause(hw, &lp_technology_ability_reg); + } + return ngbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE); +} + +/** + * ngbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ngbe_fc_autoneg(struct ngbe_hw *hw) +{ + int ret_val = NGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up = 0; + + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ngbe_media_type_fiber: + break; + + /* Autoneg flow control on copper adapters */ + case ngbe_media_type_copper: + ret_val = ngbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == NGBE_OK) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + + +/** + * ngbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. NGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +int ngbe_disable_pcie_master(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + int status = 0; + u32 i; + u16 vid = 0; + u16 cmd = 0; + u32 reg32 = 0; + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct ngbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING)) || + NGBE_REMOVED(hw->hw_addr)) + goto out; + + + /* Poll for master request bit to clear */ + for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = NGBE_ERR_MASTER_REQUESTS_PENDING; + + /* print out PCI configuration space value */ + ngbe_print_tx_hang_status(adapter); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(NGBE_ERROR_POLLING, "PCI VID is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &cmd); + ERROR_REPORT1(NGBE_ERROR_POLLING, "PCI COMMAND value is 0x%x.\n", cmd); + + reg32 = rd32(hw, 0x10000); + ERROR_REPORT1(NGBE_ERROR_POLLING, "read 0x10000 value is 0x%08x\n", reg32); + +out: + return status; +} + +/** + * ngbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +int ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ngbe_get_eeprom_semaphore(hw)) + return NGBE_ERR_SWFW_SYNC; + + if (ngbe_check_mng_access(hw)) { + gssr = rd32(hw, NGBE_MNG_SWFW_SYNC); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + wr32(hw, NGBE_MNG_SWFW_SYNC, gssr); + ngbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ngbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + } + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "ngbe_acquire_swfw_sync: i = %u, gssr = %u\n", i, gssr); + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ngbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return NGBE_ERR_SWFW_SYNC; +} + +/** + * ngbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + ngbe_get_eeprom_semaphore(hw); + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_SWFW_SYNC, mask, 0); + + ngbe_release_eeprom_semaphore(hw); +} + +/** + * ngbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +int ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +{ +#define NGBE_MAX_SECRX_POLL 40 + + int i; + int secrxreg; + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, NGBE_RSEC_CTL_RX_DIS); + for (i = 0; i < NGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, NGBE_RSEC_ST); + if (secrxreg & NGBE_RSEC_ST_RSEC_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= NGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * ngbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +int ngbe_enable_sec_rx_path(struct ngbe_hw *hw) +{ + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, 0); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +int ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + /* swap bytes for HW little endian */ + addr_low = addr[5] | (addr[4] << 8) + | (addr[3] << 16) + | (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + + if (((NGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return NGBE_ERR_INVALID_MAC_ADDR; + } + + return rar; +} + +/** + * ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +int ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo; + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return NGBE_ERR_INVALID_ARGUMENT; + } + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(hw, NGBE_PSR_MAC_SWC_VM); + + if (NGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo) + goto done; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); +done: + return 0; +} + +/** + * ngbe_set_vmdq - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +int ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 pool) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_PARAMETER(pool); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return NGBE_ERR_INVALID_ARGUMENT; + } + + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ngbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +int ngbe_set_vmdq_san_mac(struct ngbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + /* ??? */ + if (vmdq > 32) + return -1; + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + wr32(hw, NGBE_PSR_MAC_SWC_VM, 1 << vmdq); + + + return 0; +} + +/** + * ngbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +int ngbe_init_uta_tables(struct ngbe_hw *hw) +{ + int i; + + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + wr32(hw, NGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * ngbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +int ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + int regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, NGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than NGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = NGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ngbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +int ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + int regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + int ret_val = 0; + bool vfta_changed = false; + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call ngbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = ngbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, NGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * ngbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +int ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, NGBE_CFG_PORT_CTL); + if (vt & NGBE_CFG_PORT_CTL_NUM_VT_MASK) { + int vlvf_index; + u32 bits = 0; + + vlvf_index = ngbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + NGBE_PSR_VLAN_SWC_VM_L, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + NGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits |= rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, NGBE_PSR_VLAN_SWC, + (NGBE_PSR_VLAN_SWC_VIEN | vlan)); + if ((!vlan_on) && (vfta_changed != NULL)) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + *vfta_changed = false; + } + } else + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + } + + return 0; +} + +/** + * ngbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +int ngbe_clear_vfta(struct ngbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, NGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + wr32(hw, NGBE_PSR_VLAN_SWC_VM_L, 0); + } + + return 0; +} + + +/** + * ngbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + if (enable) { + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, NGBE_TDM_MAC_AS_L, + pfvfspoof & 0xff); + } else { + wr32(hw, NGBE_TDM_MAC_AS_L, 0); + } +} + +/** + * ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf > 8) + return; + + pfvfspoof = rd32(hw, NGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_VLAN_AS_L, pfvfspoof); + +} + +/** + * ngbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, + bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf <= 8) { + pfvfspoof = rd32(hw, NGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_ETYPE_AS_L, pfvfspoof); + } +} + +/** + * ngbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +int ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps) +{ + + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ngbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ngbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ngbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return NGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +int ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hicr, i, bi; + u32 hdr_size = sizeof(struct ngbe_hic_hdr); + u16 buf_len; + u32 dword_len; + int status = 0; + u32 buf[64] = {}; + + if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return NGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return NGBE_ERR_SWFW_SYNC; + } + + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + status = NGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + /*read to clean all status*/ + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "fwrdy is set before command.\n"); + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (ngbe_check_mng_access(hw)) + wr32a(hw, NGBE_MNG_MBOX, + i, NGBE_CPU_TO_LE32(buffer[i])); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_MBOX_CTL, + NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } + + buf[0] = rd32(hw, NGBE_MNG_MBOX); + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + printk("===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); + printk("===%08x\n", rd32(hw, 0x1e100)); + printk("===%08x\n", rd32(hw, 0x1e104)); + printk("===%08x\n", rd32(hw, 0x1e108)); + printk("===%08x\n", rd32(hw, 0x1e10c)); + printk("===%08x\n", rd32(hw, 0x1e044)); + printk("===%08x\n", rd32(hw, 0x10000)); + if( (buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, + bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, + bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + +rel_out: + hw->mac.ops.release_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_MB); + return status; +} + +/** + * ngbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns NGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or NGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +int ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ngbe_hic_drv_info fw_cmd; + int i; + int ret_val = 0; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + usec_delay(5000); + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ngbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ngbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + * + * Sends reset cmd to firmware through the manageability + * block. On success return 0 + * else returns NGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or NGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +int ngbe_reset_hostif(struct ngbe_hw *hw) +{ + struct ngbe_hic_reset reset_cmd; + int i; + int status = 0; + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + reset_cmd.hdr.checksum = 0; + reset_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = ngbe_host_interface_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + msleep(1); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +static u16 ngbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +int ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct ngbe_hic_upg_start start_cmd; + struct ngbe_hic_upg_write write_cmd; + struct ngbe_hic_upg_verify verify_cmd; + u32 offset; + int status = 0; + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + start_cmd.hdr.checksum = 0; + start_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + NGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = ngbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = ngbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + NGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case NGBE_MODULE_EEPROM: + verify_cmd.action_flag = NGBE_RELOAD_EEPROM; + break; + case NGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = NGBE_RESET_FIRMWARE; + break; + case NGBE_MODULE_HARDWARE: + verify_cmd.action_flag = NGBE_RESET_LAN; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "ngbe_upgrade_flash_hostif: region err %x\n", region); + return status; + } + + verify_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + + status = ngbe_host_interface_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + NGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + } + return status; +} + +/* cmd_addr is used for some special command: + * 1. to be sector address, when implemented erase sector command + * 2. to be flash address when implemented read, write flash address + */ +static int ngbe_fmgr_cmd_op(struct ngbe_hw *hw, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0, timeout = 0; + + cmd_val = NGBE_SPI_CMD_CMD(cmd) | + NGBE_SPI_CMD_CLK(SPI_CLK_DIV) | + cmd_addr; + wr32(hw, NGBE_SPI_CMD, cmd_val); + + while (1) { + if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1) + break; + + if (timeout == SPI_TIME_OUT_VALUE) + return -ETIMEDOUT; + + timeout = timeout + 1; + udelay(5); + } + + return 0; +} + +static int fmgr_usr_cmd_op(struct ngbe_hw *hw, u32 usr_cmd) +{ + int status = 0; + + wr32(hw, SPI_H_USR_CMD_REG_ADDR, usr_cmd); + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_USER_CMD, 0); + + return status; +} + +static int flash_erase_chip(struct ngbe_hw *hw) +{ + return ngbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_CHIP, 0); +} + +static int flash_erase_sector(struct ngbe_hw *hw, u32 sec_addr) +{ + return ngbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_SECTOR, sec_addr); +} + +static int ngbe_flash_write_dword(struct ngbe_hw *hw, u32 addr, u32 dword) +{ + int status = 0; + u32 data; + + wr32(hw, SPI_H_DAT_REG_ADDR, dword); + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_WRITE_DWORD, addr); + if (status) + return status; + + ngbe_flash_read_dword(hw, addr, &data); + if (dword != data) + return -EIO; + + return 0; +} + +int ngbe_flash_read_dword(struct ngbe_hw *hw, u32 addr, u32 *data) +{ + int ret = 0; + + ret = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (ret < 0) + return ret; + + *data = rd32(hw, SPI_H_DAT_REG_ADDR); + + return ret; +} + +static int ngbe_flash_write_unlock(struct ngbe_hw *hw) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x40; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000,false); + + return status; +} + +static int check_image_version(struct ngbe_hw *hw, const u8 *data) +{ + u32 image_v = 0x0; + u32 f_chip_v = 0x0; + u8 rdata_2; + u8 rdata_3, rdata_4; + u32 f_sub_id; + u8 wol = 0, ncsi = 0; + printk("===========check_image_version============\n"); + + //read image version + image_v = data[0x13a] | data[0x13b] << 8 | + data[0x13c] << 16 | data[0x13d] << 24; + printk("check_image_version=image_v: %x\n", image_v); + + //read subsytem id to check ncsi and wol + f_sub_id = data[0xfffdc] << 8 | data[0xfffdd]; + printk("The image's sub_id : %04x\n", f_sub_id); + if ((f_sub_id & 0x8000) == 0x8000) + ncsi = 1; + if ((f_sub_id & 0x4000) == 0x4000) + wol = 1; + printk("=2=ncsi : %x - wol : %x\n", ncsi, wol); + + rdata_2 = data[0xfffd8]; + printk("check_image_version=rdata_2-fffdc: %x\n", rdata_2); + rdata_3 = data[0xbc]; + printk("check_image_version=rdata_3-bc: %x\n", rdata_3); + rdata_4 = data[0x3c]; + printk("check_image_version=rdata_4-3c: %x\n", rdata_4); + + //check card's chip version + if ((image_v < 0x10015) && (image_v != 0x10012) && (image_v != 0x10013)) { + f_chip_v = 0x41;//'A' + } else if (image_v > 0x10015) { + f_chip_v = rdata_2 & 0xff; + } else if ((image_v == 0x10012) || (image_v == 0x10013) || (image_v == 0x10015)) { + if (wol == 1 || ncsi == 1) { + if (rdata_3 == 0x02) + f_chip_v = 0x41; + else + f_chip_v = 0x42; + } else { + if (rdata_4 == 0x80) + f_chip_v = 0x42; + else + f_chip_v = 0x41; + } + } + + printk("===========check_image_version============\n"); + return f_chip_v; +} + +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + u32 sector_num = 0; + u32 read_data = 0; + u8 status = 0; + u8 skip = 0; + u32 i = 0,k = 0, n = 0; + u8 flash_vendor = 0; + u32 num[128] = {0}; + u32 chip_v = 0, image_v = 0; + u32 mac_addr0_dword0_t, mac_addr0_dword1_t; + u32 mac_addr1_dword0_t, mac_addr1_dword1_t; + u32 mac_addr2_dword0_t, mac_addr2_dword1_t; + u32 mac_addr3_dword0_t, mac_addr3_dword1_t; + u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + u32 sn[24]; + u8 sn_str[40]; + u8 sn_is_str = true; + u8 vpd_tend[256]; + u32 curadr = 0; + u32 vpdadr = 0; + u8 id_str_len, pn_str_len, sn_str_len, rv_str_len; + u16 vpd_ro_len; + u32 chksum = 0; + u16 vpd_offset, vpd_end; + + read_data = rd32(hw, 0x10200); + if (read_data & 0x80000000) { + printk("The flash has been successfully upgraded once, please reboot to make it work.\n"); + return -EOPNOTSUPP; + } + + + chip_v = (rd32(hw, 0x10010) & BIT(16)) ? 0x41 : 0x42; + image_v = check_image_version(hw, data); + + printk("Checking chip/image version .......\n"); + printk("The image chip_v is %c\n", image_v); + printk("The nic chip_v is %c\n", chip_v); + if (chip_v != image_v) + { + printk("====The Gigabit image is not match the Gigabit card (chip version)====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check sub_id*/; + printk("Checking sub_id .......\n"); + printk("The card's sub_id : %04x\n", hw->subsystem_device_id); + printk("The image's sub_id : %04x\n", data[0xfffdc] << 8 | data[0xfffdd]); + if ((hw->subsystem_device_id & 0xffff) == + ((data[0xfffdc] << 8 | data[0xfffdd]) & 0xffff)){ + printk("It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff){ + printk("update anyway\n"); + } else { + printk("====The Gigabit image is not match the Gigabit card====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check dev_id*/ + printk("Checking dev_id .......\n"); + printk("The image's dev_id : %04x\n", data[0xfffde] << 8 | data[0xfffdf]); + printk("The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xffff) == ((data[0xfffde] << 8 | data[0xfffdf]) & 0xffff)) + && !(hw->device_id == 0xffff)) + { + printk("====The Gigabit image is not match the Gigabit card====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + // unlock flash write protect + ngbe_release_eeprom_semaphore(hw); + ngbe_flash_write_unlock(hw); + + wr32(hw,0x10114,0x9f050206); + wr32(hw,0x10194,0x9f050206); + + msleep(1000); + + ngbe_flash_read_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, &mac_addr0_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, &mac_addr0_dword1_t); + mac_addr0_dword1_t = mac_addr0_dword1_t & U16_MAX; + ngbe_flash_read_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, &mac_addr1_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, &mac_addr1_dword1_t); + mac_addr1_dword1_t = mac_addr1_dword1_t & U16_MAX; + ngbe_flash_read_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G, &mac_addr2_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G, &mac_addr2_dword1_t); + mac_addr2_dword1_t = mac_addr2_dword1_t & U16_MAX; + ngbe_flash_read_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G, &mac_addr3_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G, &mac_addr3_dword1_t); + + mac_addr3_dword1_t = mac_addr3_dword1_t & U16_MAX; + for (i = 0; i < 24; i++) { + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, &sn[i]); + } + if (sn[23] == U32_MAX) + sn_is_str = false; + + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, &serial_num_dword0_t); + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+4, &serial_num_dword1_t); + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+8, &serial_num_dword2_t); + printk("Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); + printk(" MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); + printk(" MAC Address2 is: 0x%04x%08x\n", mac_addr2_dword1_t, mac_addr2_dword0_t); + printk(" MAC Address3 is: 0x%04x%08x\n", mac_addr3_dword1_t, mac_addr3_dword0_t); + + for (k=0; k<(512/4); k++) + ngbe_flash_read_dword(hw, 0xfe000 + k*4, &num[k]); + + status = fmgr_usr_cmd_op(hw, 0x6); // write enable + status = fmgr_usr_cmd_op(hw, 0x98); // global protection un-lock + msleep(1000); // 1 s + + //rebuild vpd + vpd_offset = (data[0x35] << 8) | data[0x34]; + if (vpd_offset == 0x60) + vpd_end = 0xc0; + else if (vpd_offset == 0x170) + vpd_end = 0x200; + else + return 1; + + memset(vpd_tend, 0xff, sizeof(vpd_tend)); + curadr = vpd_offset + 1; + id_str_len = data[curadr] | data[curadr + 1] << 8; + curadr += (7 + id_str_len); + pn_str_len = data[curadr]; + curadr += 1 + pn_str_len; + + for (i = 0; i < curadr - vpd_offset; i++) { + vpd_tend[i] = data[vpd_offset + i]; + } + + memset(sn_str, 0x0, sizeof(sn_str)); + if (sn_is_str) { + for(i = 0; i < 24; i++) { + sn_str[i] = sn[23-i]; + } + sn_str_len = strlen(sn_str); + } else { + sn_str_len = 0x12; + sprintf(sn_str ,"%02x%08x%08x",(serial_num_dword2_t & 0xff), serial_num_dword1_t, serial_num_dword0_t); + } + + vpdadr = curadr - vpd_offset; + + if (data[curadr] == 'S' && data[curadr + 1] == 'N') { + if (data[curadr + 2]) { + for (i = sn_str_len; i < data[curadr + 2]; i++) + sn_str[i] = 0x20; + sn_str_len = data[curadr + 2]; + } + curadr += 3 + data[curadr + 2]; + rv_str_len = data[2 + curadr]; + } else { + rv_str_len = data[2 + curadr]; + } + + vpd_tend[vpdadr] = 'S'; + vpd_tend[vpdadr + 1] = 'N'; + vpd_tend[vpdadr + 2] = sn_str_len; + + for (i = 0; i < sn_str_len; i++) + vpd_tend[vpdadr + 3 + i] = sn_str[i]; + + vpdadr = vpdadr+ 3 + sn_str_len; + + for (i = 0; i < 3; i++) + vpd_tend[vpdadr + i] = data [curadr + i]; + + vpdadr += 3; + for (i = 0; i < rv_str_len; i++) + vpd_tend[vpdadr + i] = 0x0; + + vpdadr += rv_str_len; + vpd_ro_len = pn_str_len + sn_str_len + rv_str_len + 9; + vpd_tend[4 + id_str_len] = vpd_ro_len & 0xff; + vpd_tend[5 + id_str_len] = (vpd_ro_len >> 8) & 0xff; + + for (i = 0; i < vpdadr; i++) + chksum += vpd_tend[i]; + chksum = ~(chksum & 0xff) + 1; + vpd_tend[vpdadr - rv_str_len] = chksum; + vpd_tend[vpdadr] = 0x78; + // Note: for Spanish FLASH, first 8 sectors (4KB) in sector0 (64KB) need to use a special erase command (4K sector erase) + if (flash_vendor == 1) { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < 8; i++) { + flash_erase_sector(hw, i*128); + msleep(20); // 20 ms + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ + sector_num = size / SPI_SECTOR_SIZE; + if (flash_vendor == 2) { + status = flash_erase_chip(hw); + printk("Erase chip command, return status = %0d\n", status); + msleep(1000); // 1 s + } else { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i=0; i= PRODUCT_SERIAL_NUM_OFFSET_1G && i*4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 92) || + (i * 4 >= vpd_offset && i * 4 < vpd_end) || + (i * 4 == 0x15c)); + if (read_data != 0xffffffff && !skip) { + status = ngbe_flash_write_dword(hw, i*4, read_data); + if (status) { + printk("ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i*4); + ngbe_flash_read_dword(hw, i*4, &read_data); + printk(" Read data from Flash is: 0x%08x\n", read_data); + return 1; + } + } + if (i%1024 == 0) { + printk("\b\b\b\b%3d%%", (int)(i*4 * 100 / size)); + } + } + + for (i = 0; i < (vpd_end - vpd_offset) / 4; i++) { + read_data = vpd_tend[4 * i + 3] << 24 | vpd_tend[4 * i + 2] << 16 | vpd_tend[4 * i + 1] << 8 | vpd_tend[4 * i]; + read_data = __le32_to_cpu(read_data); + if (read_data != U32_MAX) { + status = ngbe_flash_write_dword(hw, vpd_offset + i * 4, read_data); + if (status) { + printk("ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); + ngbe_flash_read_dword(hw, i * 4, &read_data); + printk(" Read data from Flash is: 0x%08x\n", read_data); + return 1; + } + } + } + + chksum = 0; + for (i = 0; i < 0x400; i += 2) { + if (i >= vpd_offset && i < vpd_end) { + chksum += (vpd_tend[i - vpd_offset + 1] << 8 | vpd_tend[i - vpd_offset]); + } else if (i == 0x15e) { + continue; + } else { + chksum += (data[i + 1] << 8 | data[i]); + } + } + chksum = 0xbaba - chksum; + chksum &= 0xffff; + status = ngbe_flash_write_dword(hw, 0x15e, 0xffff0000 | chksum); + + ngbe_flash_write_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, mac_addr0_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, (mac_addr0_dword1_t | 0x80000000));//lan0 + ngbe_flash_write_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, mac_addr1_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, (mac_addr1_dword1_t | 0x80000000));//lan1 + ngbe_flash_write_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G, mac_addr2_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G, (mac_addr2_dword1_t | 0x80000000));//lan2 + ngbe_flash_write_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G, mac_addr3_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G, (mac_addr3_dword1_t | 0x80000000));//lan3 + if (sn_is_str) { + for (i = 0; i < 24; i++) { + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, sn[i]); + } + } else { + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+4, serial_num_dword1_t); + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+8, serial_num_dword2_t); + } + for (n=0; n < 512/4; n++) { + if(!(num[n] == 0xffffffff)) + ngbe_flash_write_dword(hw, 0xfe000 + n*4, num[n]); + } + wr32(hw, 0x10200, rd32(hw, 0x10200) | 0x80000000); + + return 0; +} + +/** + * ngbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT; + wr32(hw, NGBE_RDB_PB_SZ, rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / NGBE_KB_TO_B) - NGBE_TXPKT_SIZE_MAX; + + wr32(hw, NGBE_TDB_PB_SZ, txpktsize); + wr32(hw, NGBE_TDM_PB_THRE, txpbthresh); +} + +/** + * ngbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +int ngbe_get_thermal_sensor_data(struct ngbe_hw *hw) +{ + s64 tsv; + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + tsv = (s64)(rd32(hw, NGBE_TS_ST) & + NGBE_TS_ST_DATA_OUT_MASK); + /* 216 < tsv < 876 */ + + tsv = tsv < 876 ? tsv : 876 ; + + tsv = tsv - 216; + + tsv = tsv/4; + + tsv = tsv - 40; + + + data->sensor.temp = (s16)tsv; + + return 0; +} + +/** + * ngbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +int ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw) +{ + int status = 0; + + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct ngbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + + wr32(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN | NGBE_TS_INT_EN_ALARM_INT_EN); + + wr32(hw, NGBE_TS_EN, NGBE_TS_EN_ENA); + + + data->sensor.alarm_thresh = 115; + wr32(hw, NGBE_TS_ALARM_THRE, 0x344);/* magic num */ + data->sensor.dalarm_thresh = 110; + wr32(hw, NGBE_TS_DALARM_THRE, 0x330);/* magic num */ + + return status; +} + +void ngbe_disable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + struct ngbe_adapter *adapter = hw->back; + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + if (rxctrl & NGBE_RDB_PB_CTL_PBEN) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + if (pfdtxgswc & NGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~NGBE_RDB_PB_CTL_PBEN; + wr32(hw, NGBE_RDB_PB_CTL, rxctrl); + + /*OCP NCSI BMC need it*/ + if (!(hw->ncsi_enabled || + (hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP || + adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP)) + /* disable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE, 0); + + } + +} + + +void ngbe_enable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + + /* enable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_RE, NGBE_MAC_RX_CFG_RE); + + wr32m(hw, NGBE_RSEC_CTL, + 0x2, 0); + + wr32m(hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + pfdtxgswc |= NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +/** + * ngbe_mng_present - returns true when manangbeent capability is present + * @hw: pointer to hardware structure + */ +bool ngbe_mng_present(struct ngbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, NGBE_MIS_ST); + return fwsm & NGBE_MIS_ST_MNG_INIT_DN; +} + +bool ngbe_check_mng_access(struct ngbe_hw *hw) +{ + + if (!ngbe_mng_present(hw)) + return false; + return true; +} + +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, NGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) { + /* done */ + break; + } + msleep(200); + } + if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) { + err = NGBE_ERR_FLASH_LOADING_FAILED; + ERROR_REPORT1(NGBE_ERROR_POLLING, + "HW Loading Flash failed: %d\n", err); + } + } + return err; +} + +/* The ngbe_ptype_lookup is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT ngbe_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF ngbe_ptype_lookup[ptype].mac == NGBE_DEC_PTYPE_MAC_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum ngbe_l2_ptypes to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define NGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ NGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ NGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ NGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ NGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ NGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ NGBE_DEC_PTYPE_LAYER_##layer } + +#define NGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +ngbe_dptype ngbe_ptype_lookup[256] = { + NGBE_UKN(0x00), + NGBE_UKN(0x01), + NGBE_UKN(0x02), + NGBE_UKN(0x03), + NGBE_UKN(0x04), + NGBE_UKN(0x05), + NGBE_UKN(0x06), + NGBE_UKN(0x07), + NGBE_UKN(0x08), + NGBE_UKN(0x09), + NGBE_UKN(0x0A), + NGBE_UKN(0x0B), + NGBE_UKN(0x0C), + NGBE_UKN(0x0D), + NGBE_UKN(0x0E), + NGBE_UKN(0x0F), + + /* L2: mac */ + NGBE_UKN(0x10), + NGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + NGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + NGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + NGBE_UKN(0x20), + NGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + NGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x26), + NGBE_UKN(0x27), + NGBE_UKN(0x28), + NGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + NGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x2E), + NGBE_UKN(0x2F), + + /* L2: fcoe */ + NGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x35), + NGBE_UKN(0x36), + NGBE_UKN(0x37), + NGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x3D), + NGBE_UKN(0x3E), + NGBE_UKN(0x3F), + + NGBE_UKN(0x40), + NGBE_UKN(0x41), + NGBE_UKN(0x42), + NGBE_UKN(0x43), + NGBE_UKN(0x44), + NGBE_UKN(0x45), + NGBE_UKN(0x46), + NGBE_UKN(0x47), + NGBE_UKN(0x48), + NGBE_UKN(0x49), + NGBE_UKN(0x4A), + NGBE_UKN(0x4B), + NGBE_UKN(0x4C), + NGBE_UKN(0x4D), + NGBE_UKN(0x4E), + NGBE_UKN(0x4F), + NGBE_UKN(0x50), + NGBE_UKN(0x51), + NGBE_UKN(0x52), + NGBE_UKN(0x53), + NGBE_UKN(0x54), + NGBE_UKN(0x55), + NGBE_UKN(0x56), + NGBE_UKN(0x57), + NGBE_UKN(0x58), + NGBE_UKN(0x59), + NGBE_UKN(0x5A), + NGBE_UKN(0x5B), + NGBE_UKN(0x5C), + NGBE_UKN(0x5D), + NGBE_UKN(0x5E), + NGBE_UKN(0x5F), + NGBE_UKN(0x60), + NGBE_UKN(0x61), + NGBE_UKN(0x62), + NGBE_UKN(0x63), + NGBE_UKN(0x64), + NGBE_UKN(0x65), + NGBE_UKN(0x66), + NGBE_UKN(0x67), + NGBE_UKN(0x68), + NGBE_UKN(0x69), + NGBE_UKN(0x6A), + NGBE_UKN(0x6B), + NGBE_UKN(0x6C), + NGBE_UKN(0x6D), + NGBE_UKN(0x6E), + NGBE_UKN(0x6F), + NGBE_UKN(0x70), + NGBE_UKN(0x71), + NGBE_UKN(0x72), + NGBE_UKN(0x73), + NGBE_UKN(0x74), + NGBE_UKN(0x75), + NGBE_UKN(0x76), + NGBE_UKN(0x77), + NGBE_UKN(0x78), + NGBE_UKN(0x79), + NGBE_UKN(0x7A), + NGBE_UKN(0x7B), + NGBE_UKN(0x7C), + NGBE_UKN(0x7D), + NGBE_UKN(0x7E), + NGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + NGBE_UKN(0x80), + NGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0x86), + NGBE_UKN(0x87), + NGBE_UKN(0x88), + NGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0x8E), + NGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + NGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + NGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + NGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + NGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + NGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + NGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0x96), + NGBE_UKN(0x97), + NGBE_UKN(0x98), + NGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + NGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + NGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + NGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + NGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0x9E), + NGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xA6), + NGBE_UKN(0xA7), + NGBE_UKN(0xA8), + NGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xAE), + NGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xB6), + NGBE_UKN(0xB7), + NGBE_UKN(0xB8), + NGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xBE), + NGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + NGBE_UKN(0xC0), + NGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0xC6), + NGBE_UKN(0xC7), + NGBE_UKN(0xC8), + NGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0xCE), + NGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + NGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + NGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + NGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + NGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + NGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0xD6), + NGBE_UKN(0xD7), + NGBE_UKN(0xD8), + NGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + NGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + NGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + NGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + NGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0xDE), + NGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xE6), + NGBE_UKN(0xE7), + NGBE_UKN(0xE8), + NGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xEE), + NGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + NGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xF6), + NGBE_UKN(0xF7), + NGBE_UKN(0xF8), + NGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xFE), + NGBE_UKN(0xFF), +}; + +/** + * ngbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +int ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + int status = 0; + + if (hw->device_id == NGBE_DEV_ID_EM_TEST || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL_W || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1L || + hw->device_id == 0x10c || + hw->device_id == NGBE_DEV_ID_EM_WX1860NCSI ) { + *speed = NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T | + NGBE_PHYSICAL_LAYER_100BASE_TX; + } + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + *autoneg = false; + } + + return status; +} + +/** + * ngbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. +**/ +static int ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + int status = 0; + u16 value = 0; + unsigned long flags; + + *speed = 0; + + if (hw->mac.autoneg) + *autoneg = true; + else + *autoneg = false; + + if (status == 0) { + *speed = NGBE_LINK_SPEED_10_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_1GB_FULL; + } + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + *autoneg = false; + } + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + } + + if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if ((value & 7) == 1) { + *speed = NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + } + } + + return status; +} + +/** + * ngbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw) +{ + enum ngbe_media_type media_type; + + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "ngbe_get_media_type: hw->device_id = %u/n", hw->device_id); + + media_type = ngbe_media_type_copper; + + return media_type; +} + +/** + * ngbe_stop_mac_link_on_d3 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void ngbe_stop_mac_link_on_d3(struct ngbe_hw *hw) +{ + + UNREFERENCED_PARAMETER(hw); + return; +} + +/** + * ngbe_setup_mac_link - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +int ngbe_setup_mac_link(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + int status = 0; + u32 link_capabilities = NGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; + u32 lan_speed = 0; + bool link_up = false; + + if (!((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP || + hw->ncsi_enabled)) { + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == NGBE_LINK_SPEED_UNKNOWN) { + status = NGBE_ERR_LINK_SETUP; + goto out; + } + } + + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + goto out; + if ((link_speed == speed) && link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + } + +out: + return status; +} + + +/** + * ngbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static int ngbe_setup_copper_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + int status = 0; + struct ngbe_adapter *adapter = hw->back; + + /* Setup the PHY according to input speed */ + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + status = hw->phy.ops.setup_link(hw, speed, + need_restart_AN); + } + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return status; +} + +static int ngbe_reset_misc(struct ngbe_hw *hw) +{ + int i; + + /* receive packets that size > 2048 */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_JE, NGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, NGBE_MMC_CONTROL, + NGBE_MMC_CONTROL_RSTONRD, NGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, NGBE_MAC_RX_FLOW_CTRL, + NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, NGBE_MAC_PKT_FLT, + NGBE_MAC_PKT_FLT_PR); + + wr32m(hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_RST_INIT, 0xA00); + + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ + wr32(hw, NGBE_PSR_MNG_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0); + } + wr32(hw, NGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, NGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, NGBE_RDB_PFCMACDAH, 0x0180); + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + + if (hw->gpio_ctl == 1) { + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DDR, 0x1); + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + } + + ngbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +void ngbe_do_lanrst(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + + wr32(hw, NGBE_MIS_RST, + BIT(hw->bus.lan_id + 1) | + rd32(hw, NGBE_MIS_RST)); + msleep(20); + /*when veto set, lan reset not reset phy, so won't setup phy, + we use a flag record it*/ + if (ngbe_check_reset_blocked(hw)) + set_bit(__NGBE_NO_PHY_SET, &adapter->state); + else + clear_bit(__NGBE_NO_PHY_SET, &adapter->state); +} + +/** + * ngbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +int ngbe_reset_hw(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_adapter *adapter = hw->back; + u32 rst_delay, reset_status; + int status, i; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != 0) + return status; + + if (ngbe_get_media_type(hw) == ngbe_media_type_copper) { + mac->ops.setup_link = ngbe_setup_copper_link; + mac->ops.get_link_capabilities = + ngbe_get_copper_link_capabilities; + } + + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, NGBE_MIS_RST_ST) & + NGBE_MIS_RST_ST_RST_INIT) >> + NGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == NGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, NGBE_MIS_RST_ST); + if (!(reset_status & + NGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + msleep(100); + } + + if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = NGBE_ERR_RESET_FAILED; + DEBUGOUT("software reset polling failed to " + "complete.\n"); + return status; + } + status = ngbe_check_flash_load(hw, + NGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + return status; + } else if (hw->reset_type == NGBE_GLOBAL_RESET) { +#ifndef _WIN32 + adapter = (struct ngbe_adapter *)hw->back; + msleep(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); + +#endif /*_WIN32*/ + } + } else { + ngbe_do_lanrst(hw); + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + hw->phy.ops.phy_suspend(hw); + } + } + + status = ngbe_reset_misc(hw); + if (status != 0) + return status; + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES; + hw->mac.ops.init_rx_addrs(hw); + + pci_set_master(((struct ngbe_adapter *)hw->back)->pdev); + + return status; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out NGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define NGBE_ATR_COMMON_HASH_KEY \ + (NGBE_ATR_BUCKET_HASH_KEY & NGBE_ATR_SIGNATURE_HASH_KEY) +#define NGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (NGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (NGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (NGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (NGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + + +#define NGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define NGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define NGBE_WRITE_REG_BE32(a, reg, value) \ + wr32((a), (reg), NGBE_STORE_AS_BE32(NGBE_NTOHL(value))) + +#define NGBE_STORE_AS_BE16(_value) \ + NGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +/** + * ngbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +int ngbe_start_hw(struct ngbe_hw *hw) +{ + int ret_val = 0; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + NGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = hw->mac.ops.setup_fc(hw); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ret_val; +} + +/** + * ngbe_enable_rx_dma - Enable the Rx DMA unit on emerald + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for emerald + **/ +int ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval) +{ + + /* + * Workaround for emerald silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + hw->mac.ops.disable_sec_rx_path(hw); + + if (regval & NGBE_RDB_PB_CTL_PBEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + hw->mac.ops.enable_sec_rx_path(hw); + + return 0; +} + +/** + * ngbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +int ngbe_init_flash_params(struct ngbe_hw *hw) +{ + struct ngbe_flash_info *flash = &hw->flash; + u32 eec; + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + DEBUGOUT3("FLASH params: size = %d, address bits: %d\n", + flash->dword_size, + flash->address_bits); + + return 0; +} + +/** + * ngbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +int ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + int status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_DATA, data[i]); + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status) { + DEBUGOUT("FLASH read timed out\n"); + break; + } + } + + return status; +} + +/** + * ngbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +int ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + int status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status != 0) { + DEBUGOUT("FLASH write timed out\n"); + break; + } + data[i] = rd32(hw, NGBE_SPI_DATA); + } + + return status; +} + +/** + * ngbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +int ngbe_init_eeprom_params(struct ngbe_hw *hw) +{ + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + int status = 0; + + if (eeprom->type == ngbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ngbe_eeprom_none; + + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = ngbe_flash; + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + } + + eeprom->sw_region_offset = 0x80; + + return status; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static int ngbe_read_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 *data) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) + *data = (u16)rd32a(hw, NGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + + return 0; +} + +int ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + int tmp; + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)){ + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 1); + if(tmp == NGBE_CHECKSUM_CAP_ST_PASS ) + { + status = 0; + }else + status = NGBE_ERR_EEPROM_CHECKSUM; + }else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + + } + + return status; +} + +static int ngbe_phy_led_oem_chk(struct ngbe_hw *hw, u32 *data) +{ + int tmp; + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_PHY_LED_CONF; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + + if (ngbe_check_mng_access(hw)){ + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 1); + if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) + { + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 2); + *data = tmp; + status = 0; + } else if (tmp == NGBE_CHECKSUM_CAP_ST_FAIL) { + *data = tmp; + status = -1; + } else { + status = NGBE_ERR_EEPROM_CHECKSUM; + } + }else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +int ngbe_phy_signal_set(struct ngbe_hw *hw) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_PHY_SIGNAL; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +int ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 *data) +{ + int status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +int ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ngbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + int status; + u32 i; + u32 value = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = NGBE_CPU_TO_BE16(words_to_read * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = NGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + if (ngbe_check_mng_access(hw)) + value = rd32(hw, reg); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto out; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +static int ngbe_read_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) + *data = (u32)rd32a(hw, NGBE_MNG_MBOX, FW_NVM_DATA_OFFSET); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + return 0; +} + + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +int ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + int status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data32(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static int ngbe_write_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + int status; + struct ngbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +int ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + int status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + /** +* ngbe_write_ee_hostif - Write EEPROM word using hostif +* @hw: pointer to hardware structure +* @offset: offset of word in the EEPROM to write +* @data: word write to the EEPROM +* +* Write a 16 bit word to the EEPROM using the hostif. +**/ +static int ngbe_write_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 data) +{ + int status; + struct ngbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/*** ngbe_write_ee_hostif - Write EEPROM word using hostif +* @hw: pointer to hardware structure +* @offset: offset of word in the EEPROM to write +* @data: word write to the EEPROM +* +* Write a 16 bit word to the EEPROM using the hostif. +**/ +int ngbe_write_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 data) +{ + int status = 0; + + + if (hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data32(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * ngbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +int ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + int status = 0; + u16 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ngbe_write_ee_hostif_data(hw, offset + i, + data[i]); + + if (status != 0) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + + + +/** + * ngbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +int ngbe_calc_eeprom_checksum(struct ngbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + int status; + u16 checksum = 0; + u16 i; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + eeprom_ptrs = (u16 *)vmalloc(NGBE_EEPROM_LAST_WORD * + sizeof(u16)); + if (!eeprom_ptrs) + return NGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = ngbe_read_ee_hostif_buffer(hw, 0, + NGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < NGBE_EEPROM_LAST_WORD) + return NGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < NGBE_EEPROM_LAST_WORD; i++) + if (i != hw->eeprom.sw_region_offset + NGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + checksum = (u16)NGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (int)checksum; +} + +/** + * ngbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +int ngbe_update_eeprom_checksum(struct ngbe_hw *hw) +{ + int status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ngbe_read_ee_hostif(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = ngbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_write_ee_hostif(hw, NGBE_EEPROM_CHECKSUM, + checksum); + + return status; +} + +/** + * ngbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +int ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val) +{ + int status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + NGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = NGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(NGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + + +/** + * ngbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static int ngbe_check_mac_link(struct ngbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + struct ngbe_adapter *adapter = hw->back; + bool need_restart = false; + u16 value = 0, speed_sta; + u32 i, speed_store; + int status = 0; + + if (hw->mac.autoneg) + speed_store = hw->phy.autoneg_advertised; + else + speed_store = hw->phy.force_speed; + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) { + *link_up = true; + *speed = NGBE_LINK_SPEED_1GB_FULL; + return status; + } + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = hw->phy.ops.read_reg(hw, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = hw->phy.ops.read_reg(hw, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0x38; + if (*link_up) { + switch (speed_sta) { + case 0x28: + *speed = NGBE_LINK_SPEED_1GB_FULL; + if (NGBE_LINK_RETRY == 1) + hw->restart_an = 0; + break; + case 0x18: + *speed = NGBE_LINK_SPEED_100_FULL; + if (NGBE_LINK_RETRY == 1) + need_restart = true; + break; + case 0x8: + *speed = NGBE_LINK_SPEED_10_FULL; + if (NGBE_LINK_RETRY == 1) + need_restart = true; + break; + default: + break; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + + if (NGBE_LINK_RETRY == 1 && + hw->restart_an <= 2 && + need_restart == true && + (speed_store & NGBE_LINK_SPEED_1GB_FULL)) { + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + hw->phy.ops.write_reg(hw, 0, 0, value); + hw->restart_an++; + e_info(drv, "Restart an is %d\n", hw->restart_an); + } + + if (NGBE_POLL_LINK_STATUS != 1) + return status; + + if (*speed == NGBE_LINK_SPEED_1GB_FULL) { + status = hw->phy.ops.read_reg(hw, 0xA, 0x0, &value); + if (!status && !(value & BIT(13))) + *link_up = false; + } + + return status; +} + +static int ngbe_check_mac_link_mdi(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + int status = 0; + u16 speed_sta = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) + /* select page 0 */ + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + /* select page 1 */ + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + if (value & 0x400) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0xC000; + if (*link_up) { + if ( speed_sta == 0x8000) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + } else if ( speed_sta == 0x4000) { + *speed = NGBE_LINK_SPEED_100_FULL; + } else if ( speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + + return status; +} + +static int ngbe_check_mac_link_yt8521s(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + int status = 0; + u16 speed_sta = 0; + unsigned long flags; + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (value & 0x400) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + + if (value & 0x400) + *link_up = true; + else { + *link_up = false; + + ngbe_phy_read_reg_mdi(hw, 0x11, 0, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + + speed_sta = value & 0xC000; + if (*link_up) { + if ( speed_sta == 0x8000) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), BIT(1) | BIT(17)); + } else if ( speed_sta == 0x4000) { + *speed = NGBE_LINK_SPEED_100_FULL; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), BIT(2) | BIT(17)); + } else if ( speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), BIT(3) | BIT(17)); + } + } else { + *speed = NGBE_LINK_SPEED_UNKNOWN; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), 0); + } + return status; +} + +/** + * ngbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +int ngbe_setup_eee(struct ngbe_hw *hw, bool enable_eee) +{ + /* fix eee */ + + return 0; +} + +void ngbe_init_mac_link_ops(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + + mac->ops.setup_link = ngbe_setup_mac_link; +} + +/** + * ngbe_init_ops - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for emerald. + * Does not touch the hardware. + **/ + +void ngbe_init_ops(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_phy_info *phy = &hw->phy; + + ngbe_init_phy_ops_common(hw); + ngbe_init_ops_common(hw); + + if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_m88e1512_unknown) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_m88e1512; + phy->ops.reset = ngbe_phy_reset_m88e1512; + phy->ops.phy_suspend = ngbe_mv_suspend; + phy->ops.check_event = ngbe_phy_check_event_m88e1512; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause_m88e1512; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause_m88e1512; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement_m88e1512; + + mac->ops.check_link = ngbe_check_mac_link_mdi; + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_yt8521s; + phy->ops.reset = ngbe_phy_reset_yt8521s; + phy->ops.phy_suspend = ngbe_yt_suspend; + phy->ops.check_event = ngbe_phy_check_event_yt8521s; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause_yt8521s; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause_yt8521s; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement_yt8521s; + + mac->ops.check_link = ngbe_check_mac_link_yt8521s; + } +} + +void ngbe_init_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + struct ngbe_flash_info *flash = &hw->flash; + + /* MAC */ + mac->ops.init_hw = ngbe_init_hw; + mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs; + mac->ops.get_mac_addr = ngbe_get_mac_addr; + mac->ops.stop_adapter = ngbe_stop_adapter; + mac->ops.get_bus_info = ngbe_get_bus_info; + mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ngbe_release_swfw_sync; + mac->ops.reset_hw = ngbe_reset_hw; + mac->ops.get_media_type = ngbe_get_media_type; + mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path; + mac->ops.enable_rx_dma = ngbe_enable_rx_dma; + mac->ops.start_hw = ngbe_start_hw; + mac->ops.get_device_caps = ngbe_get_device_caps; + mac->ops.setup_eee = ngbe_setup_eee; + + /* LEDs */ + mac->ops.led_on = ngbe_led_on; + mac->ops.led_off = ngbe_led_off; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ngbe_set_rar; + mac->ops.clear_rar = ngbe_clear_rar; + mac->ops.init_rx_addrs = ngbe_init_rx_addrs; + mac->ops.update_uc_addr_list = ngbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list; + mac->ops.enable_mc = ngbe_enable_mc; + mac->ops.disable_mc = ngbe_disable_mc; + mac->ops.enable_rx = ngbe_enable_rx; + mac->ops.disable_rx = ngbe_disable_rx; + mac->ops.set_vmdq_san_mac = ngbe_set_vmdq_san_mac; + mac->ops.insert_mac_addr = ngbe_insert_mac_addr; + mac->rar_highwater = 1; + mac->ops.set_vfta = ngbe_set_vfta; + mac->ops.set_vlvf = ngbe_set_vlvf; + mac->ops.clear_vfta = ngbe_clear_vfta; + mac->ops.init_uta_tables = ngbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = ngbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ngbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + ngbe_set_ethertype_anti_spoofing; + + /* Flow Control */ + mac->ops.fc_enable = ngbe_fc_enable; + mac->ops.setup_fc = ngbe_setup_fc; + + /* Link */ + mac->ops.get_link_capabilities = ngbe_get_link_capabilities; + mac->ops.check_link = ngbe_check_mac_link; + mac->ops.setup_rxpba = ngbe_set_rxpba; + + mac->mcft_size = NGBE_SP_MC_TBL_SIZE; + mac->vft_size = NGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = NGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = NGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = ngbe_get_pcie_msix_count(hw); + + mac->arc_subsystem_valid = (rd32(hw, NGBE_MIS_ST) & + NGBE_MIS_ST_MNG_INIT_DN) ? true : false; + + hw->mbx.ops.init_params = ngbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.init_params = ngbe_init_eeprom_params; + eeprom->ops.calc_checksum = ngbe_calc_eeprom_checksum; + eeprom->ops.read = ngbe_read_ee_hostif; + eeprom->ops.read_buffer = ngbe_read_ee_hostif_buffer; + eeprom->ops.read32 = ngbe_read_ee_hostif32; + eeprom->ops.write = ngbe_write_ee_hostif; + eeprom->ops.write_buffer = ngbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = ngbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = ngbe_validate_eeprom_checksum; + eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap; + eeprom->ops.phy_led_oem_chk = ngbe_phy_led_oem_chk; + eeprom->ops.phy_signal_set = ngbe_phy_signal_set; + + /* FLASH */ + flash->ops.init_params = ngbe_init_flash_params; + flash->ops.read_buffer = ngbe_read_flash_buffer; + flash->ops.write_buffer = ngbe_write_flash_buffer; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver; + + mac->ops.get_thermal_sensor_data = + ngbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + ngbe_init_thermal_sensor_thresh; +} + +int ngbe_hic_write_lldp(struct ngbe_hw *hw,u32 open) +{ + u32 tmp = 0, i = 0, lldp_flash_data = 0; + int status; + struct ngbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct ngbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf3 - open; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.hdr.checksum = FW_DEFAULT_CHECKSUM; + buffer.func = PCI_FUNC(pdev->devfn); + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + + for (; i < 0x1000 / sizeof(u32); i++) { + status = ngbe_flash_read_dword(hw, NGBE_LLDP_REG + i * 4, &tmp); + if(status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + } + if (!!(lldp_flash_data & BIT(hw->bus.lan_id)) != open) + status = -EINVAL; + return status; + +} + +int ngbe_is_lldp(struct ngbe_hw *hw) +{ + u32 tmp = 0, lldp_flash_data = 0, i = 0; + struct ngbe_adapter *adapter = hw->back; + int status = 0; + + for (; i < 0x1000 / sizeof(u32); i++) { + status = ngbe_flash_read_dword(hw, NGBE_LLDP_REG + i * 4, &tmp); + if(status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + } + if (lldp_flash_data & BIT(hw->bus.lan_id)) + adapter->eth_priv_flags |= NGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~NGBE_ETH_PRIV_FLAG_LLDP; + + return 0; +} + + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h new file mode 100644 index 000000000000..6c0c806d0439 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h @@ -0,0 +1,280 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _NGBE_HW_H_ +#define _NGBE_HW_H_ + +#define NGBE_EMC_INTERNAL_DATA 0x00 +#define NGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define NGBE_EMC_DIODE1_DATA 0x01 +#define NGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define NGBE_EMC_DIODE2_DATA 0x23 +#define NGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define NGBE_EMC_DIODE3_DATA 0x2A +#define NGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +#define SPI_CLK_DIV 3 + +#define SPI_CMD_ERASE_CHIP 4 // SPI erase chip command +#define SPI_CMD_ERASE_SECTOR 3 // SPI erase sector command +#define SPI_CMD_WRITE_DWORD 0 // SPI write a dword command +#define SPI_CMD_READ_DWORD 1 // SPI read a dword command +#define SPI_CMD_USER_CMD 5 // SPI user command + +#define SPI_CLK_CMD_OFFSET 28 // SPI command field offset in Command register +#define SPI_CLK_DIV_OFFSET 25 // SPI clock divide field offset in Command register + +#define SPI_TIME_OUT_VALUE 10000 +#define SPI_SECTOR_SIZE (4 * 1024) // FLASH sector size is 64KB +#define SPI_H_CMD_REG_ADDR 0x10104 // SPI Command register address +#define SPI_H_DAT_REG_ADDR 0x10108 // SPI Data register address +#define SPI_H_STA_REG_ADDR 0x1010c // SPI Status register address +#define SPI_H_USR_CMD_REG_ADDR 0x10110 // SPI User Command register address +#define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1 +#define MISC_RST_REG_ADDR 0x1000c // Misc reset register address +#define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read + +#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH +#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 +#define MAC_ADDR1_WORD0_OFFSET_1G 0x006800c // MAC Address for LAN1, stored in external FLASH +#define MAC_ADDR1_WORD1_OFFSET_1G 0x0068014 +#define MAC_ADDR2_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN2, stored in external FLASH +#define MAC_ADDR2_WORD1_OFFSET_1G 0x0070014 +#define MAC_ADDR3_WORD0_OFFSET_1G 0x007800c // MAC Address for LAN3, stored in external FLASH +#define MAC_ADDR3_WORD1_OFFSET_1G 0x0078014 +#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 // Product Serial Number, stored in external FLASH last sector + +struct ngbe_hic_read_cab { + union ngbe_hic_hdr2 hdr; + union { + u8 d8[252]; + u16 d16[126]; + u32 d32[63]; + } dbuf; +}; + + +/** + * Packet Type decoding + **/ +/* ngbe_dec_ptype.mac: outer mac */ +enum ngbe_dec_ptype_mac { + NGBE_DEC_PTYPE_MAC_IP = 0, + NGBE_DEC_PTYPE_MAC_L2 = 2, + NGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* ngbe_dec_ptype.[e]ip: outer&encaped ip */ +#define NGBE_DEC_PTYPE_IP_FRAG (0x4) +enum ngbe_dec_ptype_ip { + NGBE_DEC_PTYPE_IP_NONE = 0, + NGBE_DEC_PTYPE_IP_IPV4 = 1, + NGBE_DEC_PTYPE_IP_IPV6 = 2, + NGBE_DEC_PTYPE_IP_FGV4 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV4), + NGBE_DEC_PTYPE_IP_FGV6 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV6), +}; + +/* ngbe_dec_ptype.etype: encaped type */ +enum ngbe_dec_ptype_etype { + NGBE_DEC_PTYPE_ETYPE_NONE = 0, + NGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + NGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + NGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + NGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* ngbe_dec_ptype.proto: payload proto */ +enum ngbe_dec_ptype_prot { + NGBE_DEC_PTYPE_PROT_NONE = 0, + NGBE_DEC_PTYPE_PROT_UDP = 1, + NGBE_DEC_PTYPE_PROT_TCP = 2, + NGBE_DEC_PTYPE_PROT_SCTP = 3, + NGBE_DEC_PTYPE_PROT_ICMP = 4, + NGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* ngbe_dec_ptype.layer: payload layer */ +enum ngbe_dec_ptype_layer { + NGBE_DEC_PTYPE_LAYER_NONE = 0, + NGBE_DEC_PTYPE_LAYER_PAY2 = 1, + NGBE_DEC_PTYPE_LAYER_PAY3 = 2, + NGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct ngbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; +typedef struct ngbe_dec_ptype ngbe_dptype; + + +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw); +int ngbe_init_hw(struct ngbe_hw *hw); +int ngbe_start_hw(struct ngbe_hw *hw); +int ngbe_clear_hw_cntrs(struct ngbe_hw *hw); +int ngbe_read_pba_string(struct ngbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +int ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr); +int ngbe_get_bus_info(struct ngbe_hw *hw); +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status); +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw); +int ngbe_stop_adapter(struct ngbe_hw *hw); + +int ngbe_led_on(struct ngbe_hw *hw, u32 index); +int ngbe_led_off(struct ngbe_hw *hw, u32 index); + +int ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr); +int ngbe_clear_rar(struct ngbe_hw *hw, u32 index); +int ngbe_init_rx_addrs(struct ngbe_hw *hw); +int ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ngbe_mc_addr_itr func, bool clear); +int ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *addr_list, + u32 addr_count, ngbe_mc_addr_itr func); +int ngbe_enable_mc(struct ngbe_hw *hw); +int ngbe_disable_mc(struct ngbe_hw *hw); +int ngbe_disable_sec_rx_path(struct ngbe_hw *hw); +int ngbe_enable_sec_rx_path(struct ngbe_hw *hw); + +int ngbe_fc_enable(struct ngbe_hw *hw); +void ngbe_fc_autoneg(struct ngbe_hw *hw); +int ngbe_setup_fc(struct ngbe_hw *hw); + +int ngbe_validate_mac_addr(u8 *mac_addr); +int ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask); +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask); +int ngbe_disable_pcie_master(struct ngbe_hw *hw); + +int ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq); +int ngbe_set_vmdq_san_mac(struct ngbe_hw *hw, u32 vmdq); +int ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq); +int ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq); +int ngbe_init_uta_tables(struct ngbe_hw *hw); +int ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +int ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +int ngbe_clear_vfta(struct ngbe_hw *hw); +int ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan); + +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf); +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf); +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, + bool enable, int vf); +int ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps); +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy); +int ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +int ngbe_reset_hostif(struct ngbe_hw *hw); +u8 ngbe_calculate_checksum(u8 *buffer, u32 length); +int ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void ngbe_clear_tx_pending(struct ngbe_hw *hw); +void ngbe_stop_mac_link_on_d3(struct ngbe_hw *hw); +bool ngbe_mng_present(struct ngbe_hw *hw); +bool ngbe_check_mng_access(struct ngbe_hw *hw); + +int ngbe_get_thermal_sensor_data(struct ngbe_hw *hw); +int ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw); +void ngbe_enable_rx(struct ngbe_hw *hw); +void ngbe_disable_rx(struct ngbe_hw *hw); +int ngbe_setup_mac_link_multispeed_fiber(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit); + +/* @ngbe_api.h */ +void ngbe_atr_compute_perfect_hash(union ngbe_atr_input *input, + union ngbe_atr_input *mask); +u32 ngbe_atr_compute_sig_hash(union ngbe_atr_hash_dword input, + union ngbe_atr_hash_dword common); + +int ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, bool *autoneg); +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw); +void ngbe_disable_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_enable_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_flap_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_set_hard_rate_select_speed(struct ngbe_hw *hw, + u32 speed); +int ngbe_setup_mac_link(struct ngbe_hw *hw, u32 speed, + bool autoneg_wait_to_complete); +void ngbe_init_mac_link_ops(struct ngbe_hw *hw); +int ngbe_reset_hw(struct ngbe_hw *hw); +int ngbe_identify_phy(struct ngbe_hw *hw); +void ngbe_init_ops_common(struct ngbe_hw *hw); +int ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval); +void ngbe_init_ops(struct ngbe_hw *hw); +int ngbe_setup_eee(struct ngbe_hw *hw, bool enable_eee); + +int ngbe_init_flash_params(struct ngbe_hw *hw); +int ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data); +int ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data); + +int ngbe_read_eeprom(struct ngbe_hw *hw, + u16 offset, u16 *data); +int ngbe_read_eeprom_buffer(struct ngbe_hw *hw, u16 offset, + u16 words, u16 *data); +int ngbe_init_eeprom_params(struct ngbe_hw *hw); +int ngbe_update_eeprom_checksum(struct ngbe_hw *hw); +int ngbe_calc_eeprom_checksum(struct ngbe_hw *hw); +int ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val); +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); +int ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data); +int ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data); +int ngbe_write_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 data); + +int ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data); +int ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, u16 *data); + +int ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, u32 *data); + +u32 ngbe_rd32_epcs(struct ngbe_hw *hw, u32 addr); +void ngbe_wr32_epcs(struct ngbe_hw *hw, u32 addr, u32 data); +void ngbe_wr32_ephy(struct ngbe_hw *hw, u32 addr, u32 data); +int ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); + +int ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data); +int ngbe_phy_signal_set(struct ngbe_hw *hw); + +int ngbe_flash_read_dword(struct ngbe_hw *hw, u32 addr, u32 *data); + +int ngbe_is_lldp(struct ngbe_hw *hw); +int ngbe_hic_write_lldp(struct ngbe_hw *hw, u32 open); + +#endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c new file mode 100644 index 000000000000..823873afc53d --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c @@ -0,0 +1,3024 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe.h" +#include "ngbe_kcompat.h" + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i = 0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char *number(char *buf, char *end, long long num, int base, int size, int precision, int type) +{ + char c, sign, tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base == 8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base == 16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t *ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf, size, fmt, args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)) +int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) +int txgbe_dcb_netlink_register(void) +{ + return 0; +} + +int txgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int txgbe_copy_dcb_cfg(struct txgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* ngbe_kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= NGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & NGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > NGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return txgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & NGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a whitelist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current whitelist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void ngbe_flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void ngbe_flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void ngbe_flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void ngbe_flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void ngbe_flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void ngbe_flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void ngbe_flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void ngbe_flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void ngbe_flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void ngbe_flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void ngbe_flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void ngbe_flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#if 0 +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif +#endif +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#ifndef ETH_HW_ADDR_SET +void _kc_eth_hw_addr_set(struct net_device *dev, const void *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* ETH_HW_ADDR_SET */ +#endif /* 5.17.0 */ \ No newline at end of file diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h new file mode 100644 index 000000000000..0063df1f305a --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h @@ -0,0 +1,7730 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/*888888888888888888888888888*/ + +/*#define NGBE_SUPPORT_KYLIN*/ /*kylin to open*/ +/*#define CONFIG_EULER_KERNEL */ /*EULER to open*/ +/*#define CONFIG_UOS_KERNEL */ /*UOS to open*/ + +/**88888888888888888888888888*/ +#ifndef NGBE_STATIC_ITR +#define NGBE_STATIC_ITR 1 /* static itr configure */ +#endif + +#ifndef NGBE_LINK_RETRY +#define NGBE_LINK_RETRY 1 /* static itr configure */ +#endif + +#ifndef NGBE_POLL_LINK_STATUS +#define NGBE_POLL_LINK_STATUS 0 +#endif + +#ifndef NGBE_PCIE_RECOVER +#define NGBE_PCIE_RECOVER 1 +#endif + +#ifndef NGBE_RECOVER_CHECK +#define NGBE_RECOVER_CHECK 1 +#endif + +#ifndef NGBE_DIS_COMP_TIMEOUT +#define NGBE_DIS_COMP_TIMEOUT 1 /* static itr configure */ +#endif + +/**88888888888888888888888888*/ + + +#if defined(NGBE_SUPPORT_KYLIN) +#ifdef UTS_UBUNTU_RELEASE_ABI +#undef UTS_UBUNTU_RELEASE_ABI +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0) +#define UTS_UBUNTU_RELEASE_ABI 21 +#else +#define UTS_UBUNTU_RELEASE_ABI 21 +#endif +#endif + +/* For Kylin: + * support Kylin-4.0.2-SP2-17122218.j1-arm64 */ +#ifdef UTS_KYLINOS_RELEASE_ABI +#ifndef UTS_UBUNTU_RELEASE_ABI +#if UTS_KYLINOS_RELEASE_ABI <= 20171215 +#define UTS_UBUNTU_RELEASE_ABI 21 +#else +#define UTS_UBUNTU_RELEASE_ABI 21 +#endif +#endif /* !UTS_UBUNTU_RELEASE_ABI */ +#endif /* UTS_KYLINOS_RELEASE_ABI */ + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct ngbe_adapter +#define adapter_q_vector ngbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +#define CONFIG_NGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8)) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef ipv6_authlen +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) + +#ifdef CONFIG_SUSE_KERNEL +/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have + * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and + * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available. + * Only fall back to the manual table otherwise. We expect all future versions + * of SLE kernels to include these values, so the table will remain only for + * the older releases. + */ +#ifdef CONFIG_SUSE_VERSION +#ifndef CONFIG_SUSE_PATCHLEVEL +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing" +#endif +#ifndef CONFIG_SUSE_AUXRELEASE +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing" +#endif +#define SLE_VERSION_CODE SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL, CONFIG_SUSE_AUXRELEASE) +#else +/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back + * to the following table for older releases. + */ +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* !CONFIG_SUSE_VERSION */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x, y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x, y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type, x, y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type, x, y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 12)) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 17)) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18)) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20)) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23)) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 25) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4))) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71)) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 5))) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) - 1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3)))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev, sz, dma, gfp) \ + pci_alloc_consistent(to_pci_dev(dev), (sz), (dma)) +#define dma_free_coherent(dev, sz, addr, dma_addr) \ + pci_free_consistent(to_pci_dev(dev), (sz), (addr), (dma_addr)) + +#define dma_map_page(dev, a, b, c, d) \ + pci_map_page(to_pci_dev(dev), (a), (b), (c), (d)) +#define dma_unmap_page(dev, a, b, c) \ + pci_unmap_page(to_pci_dev(dev), (a), (b), (c)) + +#define dma_map_single(dev, a, b, c) \ + pci_map_single(to_pci_dev(dev), (a), (b), (c)) +#define dma_unmap_single(dev, a, b, c) \ + pci_unmap_single(to_pci_dev(dev), (a), (b), (c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev, a, b, c) \ + pci_dma_sync_single(to_pci_dev(dev), (a), (b), (c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev, addr, off, sz, dir) \ + pci_dma_sync_single(to_pci_dev(dev), (addr), (off) + (sz), (dir)) + +#define dma_set_mask(dev, mask) \ + pci_set_dma_mask(to_pci_dev(dev), (mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a, b) INIT_TQUEUE(a, (void (*)(void *))b, a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while (0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_T1000_DISABLE_PACKET_SPLIT +#define CONFIG_T1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_TGB_DISABLE_PACKET_SPLIT +#define CONFIG_TGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_TGC_DISABLE_PACKET_SPLIT +#define CONFIG_TGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev, mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_WANGXUN +#define PCI_VENDOR_ID_WANGXUN 0x8088 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#ifndef NGBE_PROCFS +#define NGBE_PROCFS +#endif /* NGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#ifndef NGBE_SYSFS +#define NGBE_SYSFS +#endif /* NGBE_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef NGBE_HWMON +#define NGBE_HWMON +#endif /* NGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_NGBE_DEBUG_FS +#undef HAVE_TGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_NGBE_DEBUG_FS +#define HAVE_TGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while (0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev, mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev), (mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while (0) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while (0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 3)) +#ifdef TGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* TGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while (0) +#define u64_stats_update_end(a) do { } while (0) +#define u64_stats_fetch_retry_bh(a, b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11, 2, 0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr *)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 18)) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 40)) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d, l, _g) netdev_alloc_skb_ip_align(d, l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#endif +#define skb_frag_dma_map(dev, frag, offset, size, dir) \ + _kc_skb_frag_dma_map(dev, frag, offset, size, dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 2)) +#undef ngbe_get_netdev_tc_txq +#define ngbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct *__attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a, b, c, d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1), (_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) && \ + (!(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d, p, v) __kc_pcie_capability_read_word(d, p, v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d, p, v) __kc_pcie_capability_write_word(d, p, v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d, p, c, s) \ + __kc_pcie_capability_clear_and_set_word(d, p, c, s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr, type, member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL >=7.3 && RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#if defined(__aarch64__) +#else +#ifndef CONFIG_PPC64 +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#endif/*CONFIG_PPC64LE*/ +#endif/*aarch*/ +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#if defined(__aarch64__) +#else +#ifndef CONFIG_PPC64 +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /*CONFIG_PPC64LE*/ +#endif/*aarch*/ +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#elif (defined(UTS_RELEASE) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,0)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12,1,0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +#if 0 +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +#endif +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 4)) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 13)) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi, len) __kc_napi_alloc_skb(napi, len) +#define __napi_alloc_skb(napi, len, mask) __kc_napi_alloc_skb(napi, len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 20, 0)) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11, 3, 0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0, 47, 71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11, 4, 0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65, 0, 0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 4, 0, 21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ + +#if !((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 131)) && \ + defined(NGBE_SUPPORT_KYLIN)) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,8,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#else /* 4.8.0 */ +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if !((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 131)) && \ + defined(NGBE_SUPPORT_KYLIN)) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#endif /* !NGBE_SUPPORT_KYLIN */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16))) +#if !(defined(NGBE_SUPPORT_KYLIN)) + +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define CENTOS_MTU_PORT_UPDATE +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#else /* >= 4.12 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#if !(defined(NGBE_SUPPORT_KYLIN)) + +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#endif +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_NOT_SUPPORTED_1000baseX_Full +#endif + +#endif /* ETHTOOL_GLINKSETTINGS */ + +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in ngbe_kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#define TC_SETUP_MQPRIO 0 +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +#if 0 +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if IS_ENABLED(CONFIG_NET_DEVLINK) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#include +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) && \ + (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif +#endif /* <4.18.0 && +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT + +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) +#define HAVE_DEVLINK_REGIONS +#endif /* RHEL >= 8.0 && RHEL <= 8.2 */ +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +#if 0 +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open +#endif +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +#ifndef HAVE_DEFINE_PTP_SYSTEM +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* HAVE_DEFINE_PTP_SYSTEM */ +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +#ifndef HAVE_DEFINE_FLOW_CORRELATION +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void ngbe_flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void ngbe_flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void ngbe_flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void ngbe_flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void ngbe_flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void ngbe_flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void ngbe_flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void ngbe_flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void ngbe_flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void ngbe_flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void ngbe_flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void ngbe_flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_DEFINE_FLOW_CORRELATION */ +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define devlink_params_publish(devlink) do { } while (0) +#define devlink_params_unpublish(devlink) do { } while (0) +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +#ifndef HAVE_ETH_EXTENDED_HEADLEN +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* HAVE_ETH_EXTENDED_HEADLEN */ +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +#if 0 +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* RHEL_RELEASE_VERSION(8,1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +#if 0 +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); +#endif +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* SLE < 15.3 */ +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) + +#ifndef HAVE_DEFINE_SKB_FRAG_OFF +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif + +#ifndef HAVE_DEFINE_SKB_FRAG_ADD +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif + +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#define cpu_latency_qos_update_request pm_qos_update_request +#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) +#define cpu_latency_qos_remove_request pm_qos_remove_request + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif /* (RHEL < 8.4) || (SLE < 15.3) */ +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#else /* SLE >= 15.3 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.8.0 */ +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,4,0))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +#if 0 +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#define HAVE_XDP_QUERY_PROG +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.9.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,3))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#else /* SLE >= 15.3 */ +#if 0 +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; +#endif +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !(SLE >= 15.3) */ + +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0)))) +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#else /* SLE >= 15.3 */ +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#define NEED_ETH_HW_ADDR_SET +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +#ifdef NEED_ETH_HW_ADDR_SET +#ifndef ETH_HW_ADDR_SET +void _kc_eth_hw_addr_set(struct net_device *dev, const void *addr); +#ifndef eth_hw_addr_set +#define eth_hw_addr_set(dev, addr) \ + _kc_eth_hw_addr_set(dev, addr) +#endif /* eth_hw_addr_set */ +#endif /* ETH_HW_ADDR_SET */ +#endif /* NEED_ETH_HW_ADDR_SET */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0)) +#else /* >=6.1.0*/ +#define HAVE_NOT_NAPI_WEIGHT +#endif /* 6.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#else /* >=6.2.0*/ +#define HAVE_NOT_PTT_ADJFREQ +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry +#endif /* 6.2.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6))) +#undef HAVE_XDP_BUFF_RXQ +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4)) +#undef HAVE_XDP_QUERY_PROG +#endif /* 8.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,6)) +#else /* >= 8.6 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,6)) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* > 8.6 */ +#endif /* < 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,8)) +#define HAVE_NOT_NAPI_WEIGHT +#endif /* == 8.8 */ + +/*****************************************************************************/ +#if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,6) +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif +#endif +#if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(9,0) +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,3) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#else /* >= 9.0 */ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,0)) +#undef HAVE_ETHTOOL_COALESCE_EXTACK +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* = 9.0*/ +#define HAVE_XDP_BUFF_RXQ +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,2)) +#else /* >= 9.2 */ +#define HAVE_NOT_NAPI_WEIGHT +#endif /* < 9.2 */ + +/*****************************************************************************/ +#if SLE_VERSION_CODE >= SLE_VERSION(15,5,0) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif + + +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ +static inline int +_kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c new file mode 100644 index 000000000000..717c4f7d7692 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c @@ -0,0 +1,806 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" +#include "ngbe_sriov.h" + +/** + * ngbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool ngbe_cache_ring_vmdq(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + //struct ngbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + + for (i = 0; i < adapter->num_rx_queues; i++) { + + /* If we are greater than indices move to next pool */ + adapter->rx_ring[i]->reg_idx = reg_idx + i; + } + + reg_idx = vmdq->offset; + for (i = 0; i < adapter->num_tx_queues; i++) { + + /* If we are greater than indices move to next pool */ + adapter->tx_ring[i]->reg_idx = reg_idx + i; + } + + return true; +} + +/** + * ngbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool ngbe_cache_ring_rss(struct ngbe_adapter *adapter) +{ + u16 i, reg_i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0, reg_i = 0; i < adapter->num_tx_queues; i++, reg_i++) + adapter->tx_ring[i]->reg_idx = reg_i; + + for (i = 0; i < adapter->num_xdp_queues; i++, reg_i++) + adapter->xdp_ring[i]->reg_idx = reg_i; + + return true; +} + +/** + * ngbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ngbe_cache_ring_register(struct ngbe_adapter *adapter) +{ + if (ngbe_cache_ring_vmdq(adapter)) + return; + + ngbe_cache_ring_rss(adapter); +} + +#define NGBE_RSS_64Q_MASK 0x3F +#define NGBE_RSS_16Q_MASK 0xF +#define NGBE_RSS_8Q_MASK 0x7 +#define NGBE_RSS_4Q_MASK 0x3 +#define NGBE_RSS_2Q_MASK 0x1 +#define NGBE_RSS_DISABLED_MASK 0x0 + + +/** + * ngbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ngbe_set_vmdq_queues(struct ngbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = NGBE_RSS_DISABLED_MASK; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, NGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* when VMDQ on, disable RSS */ + rss_i = 1; + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i; + adapter->num_rx_queues = vmdq_i * rss_i; + +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = vmdq_i * rss_i; +#else + adapter->num_tx_queues = vmdq_i; +#endif /* HAVE_TX_MQ */ + adapter->num_xdp_queues = 0; + + return true; +} + +#ifdef HAVE_XDP_SUPPORT +static int ngbe_xdp_queues(struct ngbe_adapter *adapter) +{ + int queues = min_t(int, NGBE_MAX_XDP_QS, nr_cpu_ids); + + return adapter->xdp_prog ? queues : 0; +} +#endif +/** + * ngbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ngbe_set_rss_queues(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = NGBE_RSS_8Q_MASK; + + adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; +#endif +#ifdef HAVE_XDP_SUPPORT + if (adapter->xdp_prog) { + adapter->num_xdp_queues = min_t(int, ngbe_xdp_queues(adapter), rss_i); + } +#endif + return true; +} + +/* + * ngbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ngbe_set_num_queues(struct ngbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_xdp_queues = 0; + adapter->queues_per_pool = 1; + + if (ngbe_set_vmdq_queues(adapter)) + return; + + ngbe_set_rss_queues(adapter); + +} + +/** + * ngbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ngbe_acquire_msix_vectors(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & NGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + vectors = max(vectors, adapter->num_xdp_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + /* we need to alloc (7vfs+1pf+1misc) or (8vfs+1misc) msix entries */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + vectors += adapter->ring_feature[RING_F_VMDQ].offset; + } + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + if (vectors < 9) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + e_dev_warn("Remain available irqs < 9. Disable MISC IRQ REMAP.\n"); + } + else + vectors -= adapter->ring_feature[RING_F_VMDQ].offset; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= NGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void ngbe_add_ring(struct ngbe_ring *ring, + struct ngbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ngbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ngbe_alloc_q_vector(struct ngbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int xdp_count, unsigned int xdp_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct ngbe_q_vector *q_vector; + struct ngbe_ring *ring; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif + + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count + xdp_count; + size = sizeof(struct ngbe_q_vector) + + (sizeof(struct ngbe_ring) * ring_count); + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + +#endif + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); +#endif + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#ifndef NGBE_NO_LRO + /* initialize LRO */ + __skb_queue_head_init(&q_vector->lrolist.active); + +#endif + /* initialize NAPI */ +#ifdef HAVE_NOT_NAPI_WEIGHT + netif_napi_add(adapter->netdev, &q_vector->napi, + ngbe_poll); +#else + netif_napi_add(adapter->netdev, &q_vector->napi, + ngbe_poll, 64); +#endif +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif /*HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD*/ + +#ifdef HAVE_NDO_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, NGBE_QV_STATE_DISABLE); + +#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ngbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; + clear_ring_xdp(ring); + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ngbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + set_ring_xdp(ring); + + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ngbe_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ngbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vector(struct ngbe_adapter *adapter, int v_idx) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } +#ifdef HAVE_XDP_SUPPORT + if (static_key_enabled((struct static_key *)&ngbe_xdp_locking_key)) + static_branch_dec(&ngbe_xdp_locking_key); +#endif + + ngbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); +#ifndef NGBE_NO_LRO + __skb_queue_purge(&q_vector->lrolist.active); +#endif + kfree_rcu(q_vector, rcu); +} + +/** + * ngbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ngbe_alloc_q_vectors(struct ngbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int xdp_remaining = adapter->num_xdp_queues; + unsigned int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + xqpv, xdp_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + xdp_remaining -= xqpv; + rxr_idx++; + txr_idx++; + xdp_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ngbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vectors(struct ngbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); +} + +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ngbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!ngbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + ngbe_disable_sriov(adapter); + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; +#endif /* CONFIG_PCI_IOV */ + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + ngbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & NGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to " + "legacy. Error: %d\n", + err); + else + adapter->flags |= NGBE_FLAG_MSI_ENABLED; +} + +/** + * ngbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter) +{ + int err; + + /* if assigned vfs >= 7, the PF queue irq remain seq 0 and misc irq move from + * seq 1 to seq 8. it needs extra processions. + */ + if (adapter->num_vfs >= NGBE_MAX_VF_FUNCTIONS - 1) { + adapter->flags2 |= NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + adapter->irq_remap_offset = adapter->num_vfs; + } + + /* Number of supported queues */ + ngbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ngbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = ngbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ngbe_reset_interrupt_capability(adapter); + return err; + } + + ngbe_cache_ring_register(adapter); + + set_bit(__NGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ngbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter) +{ + ngbe_free_q_vectors(adapter); + ngbe_reset_interrupt_capability(adapter); + + /* remove this flags */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + } +} + +void ngbe_tx_ctxtdesc(struct ngbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ngbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = NGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= NGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c new file mode 100644 index 000000000000..9bce81cc2588 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -0,0 +1,10328 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#ifdef NETIF_F_TSO6 +#include +#endif +#endif +#include +#ifdef SIOCETHTOOL +#include +#endif + +#include +#include "ngbe.h" + +#ifdef HAVE_XDP_SUPPORT +#include +#include +#include +#endif + +#ifdef HAVE_VXLAN_CHECKS +#include +#endif /* HAVE_VXLAN_CHECKS */ + +#include "ngbe_sriov.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#include "ngbe_pcierr.h" + +char ngbe_driver_name[32] = NGBE_NAME; +static const char ngbe_driver_string[] = + "WangXun Gigabit PCI Express Network Driver"; +#define DRV_HW_PERF + +#define FPGA + +#define DRIVERIOV + +#define BYPASS_TAG + +#define RELEASE_TAG + +#if defined(NGBE_SUPPORT_KYLIN) +#define DRV_VERSION __stringify(1.2.5.3klos) +#elif defined(CONFIG_EULER_KERNEL) +#define DRV_VERSION __stringify(1.2.5.3elos) +#elif defined(CONFIG_UOS_KERNEL) +#define DRV_VERSION __stringify(1.2.5.3uos) +#else +#define DRV_VERSION __stringify(1.2.5.3) +#endif +const char ngbe_driver_version[32] = DRV_VERSION; +static const char ngbe_copyright[] = + "Copyright (c) 2018 -2019 Beijing WangXun Technology Co., Ltd"; +static const char ngbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "If the problem persists, restart the computer, or " + "power off the system and replace the adapter"; +static const char ngbe_underheat_msg[] = + "Network adapter has been started again since the temperature " + "has been back to normal state"; + +/* ngbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ngbe_pci_tbl[] = { + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_TEST), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A2), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A2S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A4), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A4S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL2), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL2S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL4), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL4S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL_W), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860NCSI), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A1L), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A1), 0}, + { PCI_VDEVICE(TRUSTNETIC, 0x10c), 0}, + /* required last entry */ + { .device = 0 } +}; +MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl); + + +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#ifdef HAVE_XDP_SUPPORT +DEFINE_STATIC_KEY_FALSE(ngbe_xdp_locking_key); +EXPORT_SYMBOL(ngbe_xdp_locking_key); +#endif +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *ngbe_wq; + +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev); +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring); +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring); +static u32 ngbe_tx_cmd_type(u32 tx_flags); + +extern ngbe_dptype ngbe_ptype_lookup[256]; + +static inline ngbe_dptype ngbe_decode_ptype(const u8 ptype) +{ + return ngbe_ptype_lookup[ptype]; +} + +static inline ngbe_dptype +decode_rx_desc_ptype(const union ngbe_rx_desc *rx_desc) +{ + return ngbe_decode_ptype(NGBE_RXD_PKTTYPE(rx_desc)); +} + +void ngbe_print_tx_hang_status(struct ngbe_adapter *adapter) +{ + int pos; + u32 value; + struct pci_dev *pdev = adapter->pdev; + u16 devctl2; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &value); + e_info(probe, "AER Uncorrectable Error Status: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &value); + e_info(probe, "AER Uncorrectable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &value); + e_info(probe, "AER Uncorrectable Error Severity: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_STATUS, &value); + e_info(probe, "AER Correctable Error Status: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_MASK, &value); + e_info(probe, "AER Correctable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_CAP, &value); + e_info(probe, "AER Capabilities and Control Register: 0x%08x\n", value); + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &devctl2); + e_info(probe, "Device Control2 Register: 0x%04x\n", devctl2); + + e_info(probe, "Tx flow control Status[TDB_TFCS 0xCE00]: 0x%x\n", + rd32(&adapter->hw, NGBE_TDB_TFCS)); + + e_info(probe, "Tx Desc Fatal Error[TDM_DESC_FATAL 0x80D0]: 0x%x\n", + rd32(&adapter->hw, NGBE_TDM_DESC_FATAL)); + return; +} + +static void ngbe_check_minimum_link(struct ngbe_adapter *adapter, + int expected_gts) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == ngbe_bus_type_internal) + return; + + pdev = adapter->pdev; + + pcie_print_link_status(pdev); + +} + +/** + * ngbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ngbe_enumerate_functions(struct ngbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + + return physfns; +} + +static void ngbe_service_event_schedule(struct ngbe_adapter *adapter) +{ + if (!test_bit(__NGBE_DOWN, &adapter->state) && + !test_bit(__NGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__NGBE_SERVICE_SCHED, &adapter->state)) + queue_work(ngbe_wq, &adapter->service_task); +} + +static void ngbe_service_event_complete(struct ngbe_adapter *adapter) +{ + BUG_ON(!test_bit(__NGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); +} + +static void ngbe_remove_adapter(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + ngbe_service_event_schedule(adapter); +} + +static void ngbe_check_remove(struct ngbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned NGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == NGBE_CFG_PORT_ST) { + ngbe_remove_adapter(hw); + return; + } + value = rd32(hw, NGBE_CFG_PORT_ST); + if (value == NGBE_FAILED_READ_REG) + ngbe_remove_adapter(hw); +} + +static u32 ngbe_validate_register_read(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct ngbe_adapter *adapter = hw->back; + + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + for (i = 0; i < NGBE_DEAD_READ_RETRIES; ++i) { + value = ngbe_rd32(reg_addr + reg); + if (value != NGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == NGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +/** + * ngbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or NGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns NGBE_FAILED_READ_REG (all ones). + */ +u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + value = ngbe_rd32(reg_addr + reg); + if (unlikely(value == NGBE_FAILED_READ_REG)) + ngbe_check_remove(hw, reg); + if (unlikely(value == NGBE_DEAD_READ_REG)) + value = ngbe_validate_register_read(hw, reg, quiet); + return value; +} + +static void ngbe_release_hw_control(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, 0); + if (hw->phy.type == ngbe_phy_yt8521s_sfi) + wr32(&adapter->hw, NGBE_CFG_LED_CTL, 0x0); +} + +static void ngbe_get_hw_control(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD); + if (hw->phy.type == ngbe_phy_yt8521s_sfi) + wr32(&adapter->hw, NGBE_CFG_LED_CTL, BIT(18)); +} + +/** + * ngbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void ngbe_set_ivar(struct ngbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + struct ngbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, NGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + /* if assigned VFs >= 7, the pf misc irq shall be remapped to 0x88. */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + ivar = msix_vector; + wr32(&adapter->hw, NGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, NGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, NGBE_PX_IVAR(queue >> 1), ivar); + } +} + +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *ring, + struct ngbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static void ngbe_update_xoff_rx_lfc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if ((hw->fc.current_mode != ngbe_fc_full) && + (hw->fc.current_mode != ngbe_fc_rx_pause)) + return; + + data = rd32(hw, NGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__NGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + for (i = 0; i < adapter->num_xdp_queues; i++) + clear_bit(__NGBE_HANG_CHECK_ARMED, + &adapter->xdp_ring[i]->state); +} + + +static u64 ngbe_get_tx_completed(struct ngbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 ngbe_get_tx_pending(struct ngbe_ring *ring) +{ + struct ngbe_adapter *adapter; + struct ngbe_hw *hw; + u32 head, tail; + + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, NGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, NGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool ngbe_check_tx_hang(struct ngbe_ring *tx_ring) +{ + u64 tx_done = ngbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = ngbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) { + + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__NGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__NGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +static void ngbe_tx_timeout_dorecovery(struct ngbe_adapter *adapter) +{ + /* schedule immediate reset if we believe we hung */ + if (adapter->hw.bus.lan_id == 0) + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + else + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); + ngbe_service_event_schedule(adapter); +} + +/** + * ngbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void ngbe_tx_timeout_reset(struct ngbe_adapter *adapter) +{ +#if 0 + if (time_after(jiffies, (adapter->tx_timeout_last_recovery + HZ*20))) + adapter->tx_timeout_recovery_level = 0; + else if (time_before(jiffies, + (adapter->tx_timeout_last_recovery + + adapter->netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + adapter->tx_timeout_last_recovery = jiffies; + netdev_info(adapter->netdev, "tx_timeout recovery level %d\n", + adapter->tx_timeout_recovery_level); + + /* Do the reset outside of interrupt context */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + switch (adapter->tx_timeout_recovery_level) { + case 0: + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + break; + case 1: + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED; + break; + case 2: + adapter->flags2 |= NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + break; + default: + netdev_err(adapter->netdev, + "tx_timeout recovery unsuccessful\n"); + break; + } + } +#endif + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + ngbe_service_event_schedule(adapter); + } +// adapter->tx_timeout_recovery_level++; +} + +/** + * ngbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +#ifdef HAVE_TX_TIMEOUT_TXQUEUE + static void ngbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else + static void ngbe_tx_timeout(struct net_device *netdev) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int i; + u16 vid = 0; + u16 cmd = 0; + u32 reg32 = 0; + u32 head, tail; + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &cmd); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", cmd); + + reg32 = rd32(&adapter->hw, 0x10000); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", reg32); + + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, NGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, NGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(NGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); + } + + reg32 = rd32(&adapter->hw, NGBE_PX_IMS); + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PX_IMS value is 0x%08x\n", reg32); + if (reg32) { + ERROR_REPORT1(NGBE_ERROR_POLLING, "clear interrupt mask.\n"); + wr32(&adapter->hw, NGBE_PX_ICS, reg32); + wr32(&adapter->hw, NGBE_PX_IMC, reg32); + } + + if (NGBE_RECOVER_CHECK == 1) { + if (vid == NGBE_FAILED_READ_CFG_WORD) { + ngbe_tx_timeout_dorecovery(adapter); + } else { + ngbe_print_tx_hang_status(adapter); + ngbe_tx_timeout_reset(adapter); + } + } else { + ngbe_tx_timeout_dorecovery(adapter); + } +} + +/** + * ngbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ngbe_clean_tx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *tx_ring) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + struct ngbe_hw *hw = &adapter->hw; + u16 vid = 0; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = NGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + do { + + union ngbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(NGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT + xdp_return_frame(tx_buffer->xdpf); +#else + page_frag_free(tx_buffer->data); +#endif + else +#endif + dev_consume_skb_any(tx_buffer->skb); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT + tx_buffer->xdpf = NULL; +#else + tx_buffer->data = NULL; +#endif + else + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring)) { + if (!ngbe_check_tx_hang(tx_ring)) { + adapter->hang_cnt = 0; + } else + adapter->hang_cnt++; + + if ( adapter->hang_cnt >= 5 ) { + /* schedule immediate reset if we believe we hung */ + + e_err(drv, "Detected Tx Unit Hang%s\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? " (XDP)" : "", + tx_ring->queue_index, + rd32(hw, NGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, NGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == NGBE_FAILED_READ_CFG_WORD) { + e_info(hw, "pcie link has been lost.\n"); + } + + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + if (NGBE_RECOVER_CHECK == 1) { + if (vid == NGBE_FAILED_READ_CFG_WORD) { + ngbe_tx_timeout_dorecovery(adapter); + } else { + ngbe_print_tx_hang_status(adapter); + ngbe_tx_timeout_reset(adapter); + } + } else { + ngbe_tx_timeout_dorecovery(adapter); + } + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + } + if(ring_is_xdp(tx_ring)) + return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ngbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__NGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (netif_queue_stopped(tx_ring->netdev) && + !test_bit(__NGBE_DOWN, &adapter->state)) { + netif_wake_queue(tx_ring->netdev); + ++tx_ring->tx_stats.restart_queue; + } +#endif + } + + return !!budget; +} + + +#ifdef NETIF_F_RXHASH +#define NGBE_RSS_L4_TYPES_MASK \ + ((1ul << NGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_SCTP)) + +static inline void ngbe_rx_hash(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (NGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} +#endif /* NETIF_F_RXHASH */ + + +/** + * ngbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ngbe_rx_checksum(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ngbe_dptype dptype = decode_rx_desc_ptype(rx_desc); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_IPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_IPE)) || + (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_OUTERIPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != NGBE_DEC_PTYPE_PROT_SCTP && NGBE_RXD_IPV6EX(rx_desc)) + return; + + /* if L4 checksum error */ + if (ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= NGBE_DEC_PTYPE_ETYPE_IG) { + #ifdef HAVE_SKBUFF_CSUM_LEVEL + skb->csum_level = 1; + #endif + } + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; +} + +static bool ngbe_alloc_mapped_skb(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +static bool ngbe_alloc_mapped_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(ngbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ngbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = rx_ring->xdp_prog ? XDP_PACKET_HEADROOM : 0; + + return true; +} +#endif +/** + * ngbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ngbe_alloc_rx_buffers(struct ngbe_ring *rx_ring, u16 cleaned_count) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = NGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + if (!ngbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + +#else + if (ring_is_hs_enabled(rx_ring)) { + if (!ngbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } + + if (!ngbe_alloc_mapped_page(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); +#endif + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = NGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; +#endif + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static inline u16 ngbe_get_hlen(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & NGBE_RXD_HDRBUFLEN_MASK; + + UNREFERENCED_PARAMETER(rx_ring); + + if (hlen > (NGBE_RX_HDR_SIZE << NGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= NGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT +/** + * ngbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *ngbe_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = NGBE_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + NGBE_CB(tail)->head = NULL; + + return head; +} + +/** + * ngbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via ngbe_merge_active_tail. + **/ +static inline void ngbe_add_active_tail(struct sk_buff *head, + struct sk_buff *tail) +{ + struct sk_buff *old_tail = NGBE_CB(head)->tail; + + if (old_tail) { + ngbe_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + NGBE_CB(tail)->head = head; + NGBE_CB(head)->tail = tail; +} + +/** + * ngbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool ngbe_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = NGBE_CB(head)->tail; + + if (!tail) + return false; + + ngbe_merge_active_tail(tail); + + NGBE_CB(head)->tail = NULL; + + return true; +} + +#endif +#ifdef HAVE_VLAN_RX_REGISTER +/** + * ngbe_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ngbe_receive_skb(struct ngbe_q_vector *q_vector, + struct sk_buff *skb) +{ + u16 vlan_tag = NGBE_CB(skb)->vid; + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + if (vlan_tag & VLAN_VID_MASK) { + /* by placing vlgrp at start of structure we can alias it */ + struct vlan_group **vlgrp = netdev_priv(skb->dev); + if (!*vlgrp) + dev_kfree_skb_any(skb); + else if (q_vector->netpoll_rx) + vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); + else + vlan_gro_receive(&q_vector->napi, + *vlgrp, vlan_tag, skb); + } else { +#endif + if (q_vector->netpoll_rx) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + } +#endif +} + +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifndef NGBE_NO_LRO +/** + * ngbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled + * @rx_ring: structure containing ring specific data + * @rx_desc: pointer to the rx descriptor + * @skb: pointer to the skb to be merged + * + **/ +static inline bool ngbe_can_lro(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + ngbe_dptype dec_ptype = decode_rx_desc_ptype(rx_desc); + + /* verify hardware indicates this is IPv4/TCP */ + if (!dec_ptype.known || + NGBE_DEC_PTYPE_ETYPE_NONE != dec_ptype.etype || + NGBE_DEC_PTYPE_IP_IPV4 != dec_ptype.ip || + NGBE_DEC_PTYPE_PROT_TCP != dec_ptype.prot) + return false; + + /* .. and LRO is enabled */ + if (!(rx_ring->netdev->features & NETIF_F_LRO)) + return false; + + /* .. and we are not in promiscuous mode */ + if (rx_ring->netdev->flags & IFF_PROMISC) + return false; + + /* .. and the header is large enough for us to read IP/TCP fields */ + if (!pskb_may_pull(skb, sizeof(struct ngbe_lrohdr))) + return false; + + /* .. and there are no VLANs on packet */ + if (skb->protocol != __constant_htons(ETH_P_IP)) + return false; + + /* .. and we are version 4 with no options */ + if (*(u8 *)iph != 0x45) + return false; + + /* .. and the packet is not fragmented */ + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + return false; + + /* .. and that next header is TCP */ + if (iph->protocol != IPPROTO_TCP) + return false; + + return true; +} + +static inline struct ngbe_lrohdr *ngbe_lro_hdr(struct sk_buff *skb) +{ + return (struct ngbe_lrohdr *)skb->data; +} + +/** + * ngbe_lro_flush - Indicate packets to upper layer. + * + * Update IP and TCP header part of head skb if more than one + * skb's chained and indicate packets to upper layer. + **/ +static void ngbe_lro_flush(struct ngbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ngbe_lro_list *lrolist = &q_vector->lrolist; + + __skb_unlink(skb, &lrolist->active); + + if (NGBE_CB(skb)->append_cnt) { + struct ngbe_lrohdr *lroh = ngbe_lro_hdr(skb); + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* close any active lro contexts */ + ngbe_close_active_frag_list(skb); + +#endif + /* incorporate ip header and re-calculate checksum */ + lroh->iph.tot_len = ntohs(skb->len); + lroh->iph.check = 0; + + /* header length is 5 since we know no options exist */ + lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); + + /* clear TCP checksum to indicate we are an LRO frame */ + lroh->th.check = 0; + + /* incorporate latest timestamp into the tcp header */ + if (NGBE_CB(skb)->tsecr) { + lroh->ts[2] = NGBE_CB(skb)->tsecr; + lroh->ts[1] = htonl(NGBE_CB(skb)->tsval); + } +#ifdef NETIF_F_GSO +#ifdef NAPI_GRO_CB + NAPI_GRO_CB(skb)->data_offset = 0; +#endif + skb_shinfo(skb)->gso_size = NGBE_CB(skb)->mss; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +#endif + } + +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + lrolist->stats.flushed++; +} + +static void ngbe_lro_flush_all(struct ngbe_q_vector *q_vector) +{ + struct ngbe_lro_list *lrolist = &q_vector->lrolist; + struct sk_buff *skb, *tmp; + + skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) + ngbe_lro_flush(q_vector, skb); +} + +/* + * ngbe_lro_header_ok - Main LRO function. + **/ +static void ngbe_lro_header_ok(struct sk_buff *skb) +{ + struct ngbe_lrohdr *lroh = ngbe_lro_hdr(skb); + u16 opt_bytes, data_len; + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + NGBE_CB(skb)->tail = NULL; +#endif + NGBE_CB(skb)->tsecr = 0; + NGBE_CB(skb)->append_cnt = 0; + NGBE_CB(skb)->mss = 0; + + /* ensure that the checksum is valid */ + if (skb->ip_summed != CHECKSUM_UNNECESSARY) + return; + + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) + return; + + /* ensure no bits set besides ack or psh */ + if (lroh->th.fin || lroh->th.syn || lroh->th.rst || + lroh->th.urg || lroh->th.ece || lroh->th.cwr || + !lroh->th.ack) + return; + + /* store the total packet length */ + data_len = ntohs(lroh->iph.tot_len); + + /* remove any padding from the end of the skb */ + __pskb_trim(skb, data_len); + + /* remove header length from data length */ + data_len -= sizeof(struct ngbe_lrohdr); + + /* + * check for timestamps. Since the only option we handle are timestamps, + * we only have to handle the simple case of aligned timestamps + */ + opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); + if (opt_bytes != 0) { + if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || + !pskb_may_pull(skb, sizeof(struct ngbe_lrohdr) + + TCPOLEN_TSTAMP_ALIGNED) || + (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP)) || + (lroh->ts[2] == 0)) { + return; + } + + NGBE_CB(skb)->tsval = ntohl(lroh->ts[1]); + NGBE_CB(skb)->tsecr = lroh->ts[2]; + + data_len -= TCPOLEN_TSTAMP_ALIGNED; + } + + /* record data_len as mss for the packet */ + NGBE_CB(skb)->mss = data_len; + NGBE_CB(skb)->next_seq = ntohl(lroh->th.seq); +} + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +static void ngbe_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) +{ + struct skb_shared_info *sh_info; + struct skb_shared_info *new_skb_info; + unsigned int data_len; + + sh_info = skb_shinfo(lro_skb); + new_skb_info = skb_shinfo(new_skb); + + /* copy frags into the last skb */ + memcpy(sh_info->frags + sh_info->nr_frags, + new_skb_info->frags, + new_skb_info->nr_frags * sizeof(skb_frag_t)); + + /* copy size data over */ + sh_info->nr_frags += new_skb_info->nr_frags; + data_len = NGBE_CB(new_skb)->mss; + lro_skb->len += data_len; + lro_skb->data_len += data_len; + lro_skb->truesize += data_len; + + /* wipe record of data from new_skb and free it */ + new_skb_info->nr_frags = 0; + new_skb->len = new_skb->data_len = 0; + dev_kfree_skb_any(new_skb); +} + +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +/** + * ngbe_lro_receive - if able, queue skb into lro chain + * @q_vector: structure containing interrupt and ring information + * @new_skb: pointer to current skb being checked + * + * Checks whether the skb given is eligible for LRO and if that's + * fine chains it to the existing lro_skb based on flowid. If an LRO for + * the flow doesn't exist create one. + **/ +static void ngbe_lro_receive(struct ngbe_q_vector *q_vector, + struct sk_buff *new_skb) +{ + struct sk_buff *lro_skb; + struct ngbe_lro_list *lrolist = &q_vector->lrolist; + struct ngbe_lrohdr *lroh = ngbe_lro_hdr(new_skb); + __be32 saddr = lroh->iph.saddr; + __be32 daddr = lroh->iph.daddr; + __be32 tcp_ports = *(__be32 *)&lroh->th; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid = NGBE_CB(new_skb)->vid; +#else + u16 vid = new_skb->vlan_tci; +#endif + + ngbe_lro_header_ok(new_skb); + + /* + * we have a packet that might be eligible for LRO, + * so see if it matches anything we might expect + */ + skb_queue_walk(&lrolist->active, lro_skb) { + u16 data_len; + + if (*(__be32 *)&ngbe_lro_hdr(lro_skb)->th != tcp_ports || + ngbe_lro_hdr(lro_skb)->iph.saddr != saddr || + ngbe_lro_hdr(lro_skb)->iph.daddr != daddr) + continue; + +#ifdef HAVE_VLAN_RX_REGISTER + if (NGBE_CB(lro_skb)->vid != vid) +#else + if (lro_skb->vlan_tci != vid) +#endif + continue; + + /* out of order packet */ + if (NGBE_CB(lro_skb)->next_seq != + NGBE_CB(new_skb)->next_seq) { + ngbe_lro_flush(q_vector, lro_skb); + NGBE_CB(new_skb)->mss = 0; + break; + } + + /* TCP timestamp options have changed */ + if (!NGBE_CB(lro_skb)->tsecr != !NGBE_CB(new_skb)->tsecr) { + ngbe_lro_flush(q_vector, lro_skb); + break; + } + + /* make sure timestamp values are increasing */ + if (NGBE_CB(lro_skb)->tsecr && + NGBE_CB(lro_skb)->tsval > NGBE_CB(new_skb)->tsval) { + ngbe_lro_flush(q_vector, lro_skb); + NGBE_CB(new_skb)->mss = 0; + break; + } + + data_len = NGBE_CB(new_skb)->mss; + + /* Check for all of the above below + * malformed header + * no tcp data + * resultant packet would be too large + * new skb is larger than our current mss + * data would remain in header + * we would consume more frags then the sk_buff contains + * ack sequence numbers changed + * window size has changed + */ + if (data_len == 0 || + data_len > NGBE_CB(lro_skb)->mss || + data_len > NGBE_CB(lro_skb)->free || +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + data_len != new_skb->data_len || + skb_shinfo(new_skb)->nr_frags >= + (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) || +#endif + ngbe_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || + ngbe_lro_hdr(lro_skb)->th.window != lroh->th.window) { + ngbe_lro_flush(q_vector, lro_skb); + break; + } + + /* Remove IP and TCP header */ + skb_pull(new_skb, new_skb->len - data_len); + + /* update timestamp and timestamp echo response */ + NGBE_CB(lro_skb)->tsval = NGBE_CB(new_skb)->tsval; + NGBE_CB(lro_skb)->tsecr = NGBE_CB(new_skb)->tsecr; + + /* update sequence and free space */ + NGBE_CB(lro_skb)->next_seq += data_len; + NGBE_CB(lro_skb)->free -= data_len; + + /* update append_cnt */ + NGBE_CB(lro_skb)->append_cnt++; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* if header is empty pull pages into current skb */ + ngbe_merge_frags(lro_skb, new_skb); +#else + /* chain this new skb in frag_list */ + ngbe_add_active_tail(lro_skb, new_skb); +#endif + + if ((data_len < NGBE_CB(lro_skb)->mss) || lroh->th.psh || + skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) { + ngbe_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; + ngbe_lro_flush(q_vector, lro_skb); + } + + lrolist->stats.coal++; + return; + } + + if (NGBE_CB(new_skb)->mss && !lroh->th.psh) { + /* if we are at capacity flush the tail */ + if (skb_queue_len(&lrolist->active) >= NGBE_LRO_MAX) { + lro_skb = skb_peek_tail(&lrolist->active); + if (lro_skb) + ngbe_lro_flush(q_vector, lro_skb); + } + + /* update sequence and free space */ + NGBE_CB(new_skb)->next_seq += NGBE_CB(new_skb)->mss; + NGBE_CB(new_skb)->free = 65521 - new_skb->len; + + /* .. and insert at the front of the active list */ + __skb_queue_head(&lrolist->active, new_skb); + + lrolist->stats.coal++; + return; + } + + /* packet not handled by any of the above, pass it to the stack */ +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_receive_skb(q_vector, new_skb); +#else + napi_gro_receive(&q_vector->napi, new_skb); +#endif /* HAVE_VLAN_RX_REGISTER */ +} + +#endif /* NGBE_NO_LRO */ + +static void ngbe_rx_vlan(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifndef HAVE_VLAN_RX_REGISTER + u8 idx = 0; + u16 ethertype; +#endif +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((ring->netdev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) && +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if ((ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && +#else + if ((ring->netdev->features & NETIF_F_HW_VLAN_RX) && +#endif + ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_VP)) +#ifndef HAVE_VLAN_RX_REGISTER + { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_TPID_MASK) >> NGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +#else /* !HAVE_VLAN_RX_REGISTER */ + NGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + NGBE_CB(skb)->vid = 0; +#endif /* !HAVE_VLAN_RX_REGISTER */ +} + +/** + * ngbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ngbe_process_skb_fields(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_PTP_1588_CLOCK + u32 flags = rx_ring->q_vector->adapter->flags; +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef NETIF_F_RXHASH + ngbe_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ + + ngbe_rx_checksum(rx_ring, rx_desc, skb); +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(flags & NGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_TS))) { + ngbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } +#endif /* HAVE_PTP_1588_CLOCK */ + + ngbe_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ngbe_rx_skb(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ngbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif + +#ifndef NGBE_NO_LRO + if (ngbe_can_lro(rx_ring, rx_desc, skb)) + ngbe_lro_receive(q_vector, skb); + else +#endif +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + +#ifndef NETIF_F_GRO + rx_ring->netdev->last_rx = jiffies; +#endif +} + +/** + * ngbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ngbe_is_non_eop(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#else + struct ngbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#endif + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NGBE_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + ngbe_add_active_tail(skb, next_skb); + NGBE_CB(next_skb)->head = skb; +#else + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +/** + * ngbe_pull_tail - ngbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an ngbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ngbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, NGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ngbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ngbe_dma_sync_frag(struct ngbe_ring *rx_ring, + struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(NGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, NGBE_CB(skb)->dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + NGBE_RX_DMA_ATTR); +#endif + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + +/** + * ngbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ngbe_cleanup_headers(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + if (IS_ERR(skb)) + return true; + + /* verify that the packet does not have any known errors */ + if (unlikely(ngbe_test_staterr(rx_desc, + NGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb) && !skb_headlen(skb)) + ngbe_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ngbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ngbe_reuse_rx_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *old_buff) +{ + struct ngbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; +#endif + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma, + new_buff->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool ngbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * ngbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ngbe_add_rx_frag(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ngbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ngbe_rx_pg_size(rx_ring) - + ngbe_rx_bufsz(rx_ring); +#endif + + if ((size <= NGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb) && + !ring_is_hs_enabled(rx_ring)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ngbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ngbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + +static struct sk_buff *ngbe_fetch_rx_buffer(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + NGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + + NGBE_CB(skb)->dma = rx_buffer->page_dma; + } else { + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) + ngbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +static struct sk_buff *ngbe_fetch_rx_buffer_hs(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + int hdr_len = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + rx_buffer->skb = NULL; + prefetchw(skb->data); + + if (!skb_is_nonlinear(skb)) { + hdr_len = ngbe_get_hlen(rx_ring, rx_desc); + if (hdr_len > 0) { + __skb_put(skb, hdr_len); + NGBE_CB(skb)->dma_released = true; + NGBE_CB(skb)->dma = rx_buffer->dma; + rx_buffer->dma = 0; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + NGBE_CB(skb)->dma = rx_buffer->page_dma; + goto add_frag; + } + } + + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) { + if (skb_headlen(skb)) { + if (NGBE_CB(skb)->dma_released == true) { + dma_unmap_single(rx_ring->dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + } else + ngbe_dma_sync_frag(rx_ring, skb); + } + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +add_frag: + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} +#define NGBE_XDP_PASS 0 +#define NGBE_XDP_CONSUMED 1 +#define NGBE_XDP_TX 2 +#define NGBE_XDP_REDIR 4 + + +#define NGBE_TXD_CMD (NGBE_TXD_EOP | \ + NGBE_TXD_RS) + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT +int ngbe_xmit_xdp_ring(struct ngbe_ring *ring, struct xdp_frame *xdpf) +#else +int ngbe_xmit_xdp_ring(struct ngbe_ring *ring, struct xdp_buff *xdp) +#endif +{ + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + u32 len, cmd_type = 0; + dma_addr_t dma; + u16 i; +#ifdef HAVE_XDP_FRAME_STRUCT + len = xdpf->len; +#else + len = xdp->data_end - xdp->data; +#endif + + if (unlikely(!ngbe_desc_unused(ring))) + return NGBE_XDP_CONSUMED; + +#ifdef HAVE_XDP_FRAME_STRUCT + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); +#else + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); +#endif + if (dma_mapping_error(ring->dev, dma)) + return NGBE_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = NGBE_TX_DESC(ring, i); + + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + +#ifdef HAVE_XDP_FRAME_STRUCT + tx_buffer->xdpf = xdpf; +#else + tx_buffer->data = xdp->data; +#endif + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + cmd_type = ngbe_tx_cmd_type(tx_buffer->tx_flags); + cmd_type |= len | NGBE_TXD_CMD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << NGBE_TXD_PAYLEN_SHIFT); + + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + + return NGBE_XDP_TX; +} +#endif +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +static inline void +_kc_bpf_warn_invalid_xdp_action(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + _kc_bpf_warn_invalid_xdp_action(dev, prog, act) +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +static bool ngbe_can_reuse_rx_page(struct ngbe_rx_buffer *rx_buffer, + struct ngbe_ring *rx_ring) +{ + struct page *page = rx_buffer->page; +#if (PAGE_SIZE < 8192) +#else + unsigned int last_offset = ngbe_rx_pg_size(rx_ring) - + ngbe_rx_bufsz(rx_ring); +#endif + + /* avoid re-using remote pages */ + if (unlikely(ngbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; +#else + if (rx_buffer->page_offset > last_offset) + return false; + +#endif + page_ref_inc(page); + return true; +} + + +static void ngbe_put_rx_buffer(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ngbe_can_reuse_rx_page(rx_buffer, rx_ring)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, + ngbe_rx_pg_order(rx_ring)); + + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static struct sk_buff * +ngbe_run_xdp(struct ngbe_adapter __maybe_unused *adapter, + struct ngbe_ring __maybe_unused *rx_ring, + struct ngbe_rx_buffer __maybe_unused *rx_buffer, + struct xdp_buff __maybe_unused *xdp) +{ + int result = NGBE_XDP_PASS; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; + struct ngbe_ring *ring; +#ifdef HAVE_XDP_FRAME_STRUCT + struct xdp_frame *xdpf; +#endif + int err; + u32 act; + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) { + goto xdp_out; + } +#ifdef HAVE_XDP_FRAME_STRUCT + prefetchw(xdp->data_hard_start); /* xdp_frame write */ +#endif + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + page_ref_inc(rx_buffer->page); +#ifdef HAVE_XDP_FRAME_STRUCT + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = NGBE_XDP_CONSUMED; + break; + } +#endif + ring = adapter->xdp_ring[rx_ring->queue_index % adapter->num_xdp_queues]; + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); +#ifdef HAVE_XDP_FRAME_STRUCT + result = ngbe_xmit_xdp_ring(ring, xdpf); +#else + result = ngbe_xmit_xdp_ring(ring, xdp); +#endif + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + page_ref_inc(rx_buffer->page); + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) { + result = NGBE_XDP_REDIR; + } else { + result = NGBE_XDP_CONSUMED; + } + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + fallthrough; + case XDP_DROP: + result = NGBE_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); +#endif /* HAVE_XDP_SUPPORT */ + + return ERR_PTR(-result); +} + +static unsigned int ngbe_rx_frame_truesize(struct ngbe_ring *rx_ring, + unsigned int size) +{ + + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = ngbe_rx_bufsz(rx_ring); +#else + truesize = ALIGN(size, L1_CACHE_BYTES) +#ifdef HAVE_XDP_BUFF_FRAME_SZ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#endif + ; +#endif + return truesize; +} +static void ngbe_rx_buffer_flip(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = ngbe_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +/** + * ngbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed. + **/ +static int ngbe_clean_rx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0, xdp_xmit = 0; + u16 cleaned_count = ngbe_desc_unused(rx_ring); + struct ngbe_adapter *adapter = q_vector->adapter; + struct xdp_buff xdp; + xdp.data = NULL; + xdp.data_end = NULL; +#ifdef HAVE_XDP_BUFF_RXQ + if(rx_ring->xdp_prog) + xdp.rxq = &rx_ring->xdp_rxq; +#endif + +#ifdef HAVE_XDP_BUFF_FRAME_SZ + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + if(rx_ring->xdp_prog) + xdp.frame_sz = ngbe_rx_frame_truesize(rx_ring, 0); +#endif +#endif + do { + struct ngbe_rx_buffer *rx_buffer; + union ngbe_rx_desc *rx_desc; + struct sk_buff *skb = NULL; + unsigned int size = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= NGBE_RX_BUFFER_WRITE) { + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = NGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) { + break; + } + if (rx_ring->xdp_prog){ + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) { + break; + } + } + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + if (rx_ring->xdp_prog) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META + xdp.data_meta = xdp.data; +#endif /* HAVE_XDP_BUFF_DATA_META */ + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + +#ifdef HAVE_XDP_BUFF_FRAME_SZ +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = ngbe_rx_frame_truesize(rx_ring, size); +#endif +#endif + skb = ngbe_run_xdp(adapter, rx_ring, rx_buffer, &xdp); + } + if (IS_ERR(skb)) { + if ((PTR_ERR(skb) == -NGBE_XDP_TX) || (PTR_ERR(skb) == -NGBE_XDP_REDIR)) { + xdp_xmit = (-PTR_ERR(skb)); + ngbe_rx_buffer_flip(rx_ring, rx_buffer, + size); + } + total_rx_packets++; + total_rx_bytes += size; + } else { + if (ring_is_hs_enabled(rx_ring)) + skb = ngbe_fetch_rx_buffer_hs(rx_ring, rx_desc); + else + skb = ngbe_fetch_rx_buffer(rx_ring, rx_desc); + } + /* exit if we failed to retrieve a buffer */ + if (!skb) { + break; + } + if (IS_ERR(skb)) { + ngbe_put_rx_buffer(rx_ring, rx_buffer, skb); + } + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ngbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ngbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ngbe_process_skb_fields(rx_ring, rx_desc, skb); + + ngbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); +#ifdef HAVE_XDP_SUPPORT + if (xdp_xmit & NGBE_XDP_TX) { + struct ngbe_ring *ring = adapter->xdp_ring[rx_ring->queue_index % adapter->num_xdp_queues]; + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + wmb(); + writel(ring->next_to_use, ring->tail); + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + } + if (xdp_xmit & NGBE_XDP_REDIR) + xdp_do_flush_map(); +#endif + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + +#ifndef NGBE_NO_LRO + ngbe_lro_flush_all(q_vector); +#endif + return total_rx_packets; +} + +#else /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +/** + * ngbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a legacy approach to Rx interrupt + * handling. This version will perform better on systems with a low cost + * dma mapping API. + * + * Returns amount of work completed. + **/ +static int ngbe_clean_rx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 len = 0; + u16 cleaned_count = ngbe_desc_unused(rx_ring); + + do { + struct ngbe_rx_buffer *rx_buffer; + union ngbe_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= NGBE_RX_BUFFER_WRITE) { + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = NGBE_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) + break; + + /* + * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.length); + /* pull the header of the skb in */ + __skb_put(skb, len); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!NGBE_CB(skb)->head) { + NGBE_CB(skb)->dma = rx_buffer->dma; + } else { + skb = ngbe_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (ngbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + NGBE_CB(skb)->dma = 0; + + if (ngbe_close_active_frag_list(skb) && + !NGBE_CB(skb)->append_cnt) { + dev_kfree_skb_any(skb); + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(ngbe_test_staterr(rx_desc, + NGBE_RXD_ERR_FRAME_ERR_MASK))) { + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ngbe_process_skb_fields(rx_ring, rx_desc, skb); + + ngbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + +#ifndef NGBE_NO_LRO + ngbe_lro_flush_all(q_vector); + +#endif /* NGBE_NO_LRO */ + return total_rx_packets; +} + +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ngbe_busy_poll_recv(struct napi_struct *napi) +{ + struct ngbe_q_vector *q_vector = + container_of(napi, struct ngbe_q_vector, napi); + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_ring *ring; + int found = 0; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ngbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ngbe_for_each_ring(ring, q_vector->rx) { + found = ngbe_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ngbe_qv_unlock_poll(q_vector); + + return found; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +/** + * ngbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ngbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ngbe_configure_msix(struct ngbe_adapter *adapter) +{ + u16 v_idx; + u32 i; + u32 eitrsel = 0; + + + /* Populate MSIX to EITR Select */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + else { + for(i = 0;i < adapter->num_vfs; i++) { + eitrsel |= 1 << i; + } + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + ngbe_write_eitr(q_vector); + } + + /* misc ivar from seq 1 to seq 8 */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + v_idx += adapter->ring_feature[RING_F_VMDQ].offset; + + ngbe_set_ivar(adapter, -1, 0, v_idx); + wr32(&adapter->hw, NGBE_PX_ITR(v_idx), 1950); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ngbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ngbe_param.c) + **/ +#if 0 +static void ngbe_update_itr(struct ngbe_q_vector *q_vector, + struct ngbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate manangbeent + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) { + itr_setting = low_latency; + } + break; + case low_latency: + if (bytes_perint > 20) { + itr_setting = bulk_latency; + } else if (bytes_perint <= 10) { + itr_setting = lowest_latency; + } + break; + case bulk_latency: + if (bytes_perint <= 20) { + itr_setting = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} +#endif +/** + * ngbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ngbe_write_eitr(struct ngbe_q_vector *q_vector) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & NGBE_MAX_EITR; + + itr_reg |= NGBE_PX_ITR_CNT_WDIS; + + wr32(hw, NGBE_PX_ITR(v_idx), itr_reg); +} + +#if 0 +static void ngbe_set_itr(struct ngbe_q_vector *q_vector) +{ + u16 new_itr = q_vector->itr; + u8 current_itr; + + ngbe_update_itr(q_vector, &q_vector->tx); + ngbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = NGBE_70K_ITR; + break; + case low_latency: + new_itr = NGBE_20K_ITR; + break; + case bulk_latency: + new_itr = NGBE_7K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (20 * new_itr * q_vector->itr) / + ((19 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + ngbe_write_eitr(q_vector); + } +} +#endif +/** + * ngbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void ngbe_check_overtemp_subtask(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + int temp_state; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + + + temp_state = ngbe_phy_check_overtemp(hw); + if (!temp_state || temp_state == NGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == NGBE_ERR_UNDERTEMP && + test_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_underheat_msg); + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + netif_carrier_on(adapter->netdev); +#ifdef HAVE_VIRTUAL_STATION + netdev_for_each_all_upper_dev_rcu(adapter->netdev, + upper, iter) { + if (!netif_is_macvlan(upper)) + continue; + netif_carrier_on(upper); + } +#endif + clear_bit(__NGBE_HANGING, &adapter->state); + } else if (temp_state == NGBE_ERR_OVERTEMP && + !test_and_set_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_overheat_msg); + netif_carrier_off(adapter->netdev); +#ifdef HAVE_VIRTUAL_STATION + netdev_for_each_all_upper_dev_rcu(adapter->netdev, + upper, iter) { + if (!netif_is_macvlan(upper)) + continue; + netif_carrier_off(upper); + } +#endif + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void ngbe_check_overtemp_event(struct ngbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_EVENT; + ngbe_service_event_schedule(adapter); + } +} + + +static void ngbe_handle_phy_event(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u32 reg; + + reg = rd32(hw, NGBE_GPIO_INTSTATUS); + wr32(hw, NGBE_GPIO_EOI,reg); + + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) + hw->phy.ops.check_event(hw); + adapter->lsc_int++; + adapter->link_check_timeout = jiffies; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + ngbe_service_event_schedule(adapter); + } +} + +/** + * ngbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + struct ngbe_hw *hw = &adapter->hw; + + /* enable misc interrupt */ + mask = NGBE_PX_MISC_IEN_MASK; + + if (adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= NGBE_PX_MISC_IEN_OVER_HEAT; + +#ifdef HAVE_PTP_1588_CLOCK + mask |= NGBE_PX_MISC_IEN_TIMESYNC; +#endif /* HAVE_PTP_1588_CLOCK */ + + wr32(&adapter->hw, NGBE_GPIO_DDR, 0x1); + wr32(&adapter->hw, NGBE_GPIO_INTEN, 0x3); + wr32(&adapter->hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0); + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x0); + else + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x3); + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) + mask |= NGBE_PX_MISC_IEN_GPIO; +// mask &= ~NGBE_PX_MISC_IEN_GPIO; + + wr32(hw, NGBE_PX_MISC_IEN, mask); + + /* unmask interrupt */ + if (queues) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_ALL); + else { + if (!(adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP)) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC(adapter)); + else + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC_VMDQ(adapter)); + } + + /* flush configuration */ + if (flush) + NGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr & NGBE_PX_MISC_IC_VF_MBOX) + ngbe_msg_task(adapter); + + if (eicr & NGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); + if (hw->bus.lan_id == 0) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + ngbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); + } + + if (eicr & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + if (((ecc & NGBE_MIS_ST_LAN0_ECC) && (hw->bus.lan_id == 0)) || + ((ecc & NGBE_MIS_ST_LAN1_ECC) && (hw->bus.lan_id == 1)) || + ((ecc & NGBE_MIS_ST_LAN2_ECC) && (hw->bus.lan_id == 2)) || + ((ecc & NGBE_MIS_ST_LAN3_ECC) && (hw->bus.lan_id == 3))) { + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_ECC_ERR_RESET; + } + ngbe_service_event_schedule(adapter); + } + if (eicr & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + if ((eicr & NGBE_PX_MISC_IC_STALL) || + (eicr & NGBE_PX_MISC_IC_ETH_EVENT)) { + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + ngbe_service_event_schedule(adapter); + } + + ngbe_check_overtemp_event(adapter, eicr); + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); +#endif + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ngbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ngbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ngbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int ngbe_poll(struct napi_struct *napi, int budget) +{ + struct ngbe_q_vector *q_vector = + container_of(napi, struct ngbe_q_vector, napi); + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ngbe_for_each_ring(ring, q_vector->tx) { + if (!ngbe_clean_tx_irq(q_vector, ring)) + clean_complete = false; + } + +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + if (!ngbe_qv_lock_napi(q_vector)) + return budget; +#endif + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ngbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ngbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + if (cleaned >= per_ring_budget) + clean_complete = false; + } +#ifdef HAVE_NDO_BUSY_POLL + ngbe_qv_unlock_napi(q_vector); +#endif + +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; + +#endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); +#if 0 + if (adapter->rx_itr_setting == 1) + ngbe_set_itr(q_vector); +#endif + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_intr_enable(&adapter->hw, + NGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +/** + * ngbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ngbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ngbe_request_msix_irqs(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ngbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt" + " '%s' Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + vector += adapter->irq_remap_offset; + + err = request_irq(adapter->msix_entries[vector].vector, + ngbe_msix_other, 0, netdev->name, adapter); + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + vector -= adapter->irq_remap_offset; + + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + +#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); +#endif + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ngbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + u32 eicr_misc; + u32 ecc = 0; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + if (!eicr) { + /* + * shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + adapter->isb_mem[NGBE_ISB_VEC0] = 0; + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED)) + wr32(&(adapter->hw), NGBE_PX_INTA, 1); + + eicr_misc = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr_misc & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr_misc & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_ECC_ERR_RESET; + ngbe_service_event_schedule(adapter); + } + + if (eicr_misc & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + ngbe_check_overtemp_event(adapter, eicr_misc); + + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); +#endif + + adapter->isb_mem[NGBE_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ngbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * ngbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ngbe_request_irq(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + err = ngbe_request_msix_irqs(adapter); + else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &ngbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &ngbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ngbe_free_irq(struct ngbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + +#endif + free_irq(entry->vector, q_vector); + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + free_irq( + adapter->msix_entries[vector + adapter->irq_remap_offset].vector, + adapter); + } + else + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * ngbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void ngbe_irq_disable(struct ngbe_adapter *adapter) +{ + wr32(&adapter->hw, NGBE_PX_MISC_IEN, 0); + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + + NGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ngbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ngbe_configure_msi_and_legacy(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + struct ngbe_ring *ring; + + ngbe_write_eitr(q_vector); + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + ngbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ngbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ngbe_configure_tx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = NGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), NGBE_PX_TR_CFG_SWFLSH); + NGBE_WRITE_FLUSH(hw); + + wr32(hw, NGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= NGBE_RING_SIZE(ring) << NGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT; + + + clear_bit(__NGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), txdctl); + + + /* poll to verify queue is enabled */ + do { + msleep(1); + txdctl = rd32(hw, NGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + + + +/** + * ngbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ngbe_configure_tx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_tx_queues > 1) + adapter->netdev->features |= NETIF_F_MULTI_QUEUE; + else + adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; +#endif + + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ngbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); + + wr32m(hw, NGBE_TSEC_BUF_AE, 0x3FF, 0x10); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + + wr32m(hw, NGBE_TSEC_CTL, 0x1, 1); + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE, NGBE_MAC_TX_CFG_TE); +} + +static void ngbe_enable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl |= NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void ngbe_disable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter) +{ + int i; + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ngbe_fc_tx_pause))) { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +static void ngbe_configure_srrctl(struct ngbe_adapter *adapter, + struct ngbe_ring *rx_ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, NGBE_PX_RR_CFG(reg_idx), + ~(NGBE_PX_RR_CFG_RR_HDR_SZ | + NGBE_PX_RR_CFG_RR_BUF_SZ | + NGBE_PX_RR_CFG_SPLIT_MODE)); + + /* configure header buffer length, needed for RSC */ + srrctl |= NGBE_RX_HDR_SIZE << NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + NGBE_PX_RR_CFG_BSIZEPKT_SHIFT; +#else + srrctl |= ngbe_rx_bufsz(rx_ring) >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (ring_is_hs_enabled(rx_ring)) + srrctl |= NGBE_PX_RR_CFG_SPLIT_MODE; +#endif + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + */ +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void ngbe_store_reta(struct ngbe_adapter *adapter) +{ + u32 i, reta_entries = ngbe_rss_indir_tbl_entries(adapter); + struct ngbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, NGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +#if 0 +/** + * Write the RETA table to HW (for devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ngbe_store_vfreta(struct ngbe_adapter *adapter) +{ + u32 i, reta_entries = ngbe_rss_indir_tbl_entries(adapter); + struct ngbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, NGBE_RDB_VMRSSTBL(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} +#endif +static void ngbe_setup_reta(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* + * Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 1; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, NGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ngbe_store_reta(adapter); +} +#if 0 +static void ngbe_setup_vfreta(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + +#if 0 + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, NGBE_RDB_VMRSSRK(i, pf_pool), + adapter->rss_key[i]); +#endif + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ngbe_store_vfreta(adapter); +} +#endif +static void ngbe_setup_mrqc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* VT, and RSS do not coexist at the same time */ + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { +// printk("ngbe_setup_mrqc not process\n"); + return; + } + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_PCSD, NGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = NGBE_RDB_RA_CTL_RSS_IPV4 | + NGBE_RDB_RA_CTL_RSS_IPV4_TCP | + NGBE_RDB_RA_CTL_RSS_IPV6 | + NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + ngbe_setup_reta(adapter); +#if 0 + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + ngbe_setup_vfreta(adapter); + } else { + ngbe_setup_reta(adapter); + } +#endif + + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + rss_field |= NGBE_RDB_RA_CTL_RSS_EN; + wr32(hw, NGBE_RDB_RA_CTL, rss_field); +} + +static void ngbe_rx_desc_queue_enable(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + do { + msleep(1); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); + } +} +#if 0 +/* disable the specified tx ring/queue */ +void ngbe_disable_tx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl, reg_offset, enable_mask; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + reg_offset = NGBE_PX_TR_CFG(reg_idx); + enable_mask = NGBE_PX_TR_CFG_ENABLE; + + /* write value back with TDCFG.ENABLE bit cleared */ + wr32m(hw, reg_offset, enable_mask, 0); + + /* the hardware may take up to 100us to really disable the tx queue */ + do { + udelay(10); + rxdctl = rd32(hw, reg_offset); + } while (--wait_loop && (rxdctl & enable_mask)); + + if (!wait_loop) { + e_err(drv, "TDCFG.ENABLE on Tx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} +#endif +/* disable the specified rx ring/queue */ +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, 0); + + /* hardware may take up to 100us to actually disable rx queue */ + do { + udelay(10); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ngbe_configure_rx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + ngbe_disable_rx_queue(adapter, ring); +#ifdef HAVE_XDP_BUFF_RXQ + if(ring->q_vector) + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); +#endif + wr32(hw, NGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_RR_BAH(reg_idx), rdba >> 32); + + if (ring->count == NGBE_MAX_RXD) + rxdctl |= 0 << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT; + wr32(hw, NGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; +#endif + + ngbe_configure_srrctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN); + + ngbe_rx_desc_queue_enable(adapter, ring); + ngbe_alloc_rx_buffers(ring, ngbe_desc_unused(ring)); +} + +static void ngbe_setup_psrtype(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = NGBE_RDB_PL_CFG_L4HDR | + NGBE_RDB_PL_CFG_L3HDR | + NGBE_RDB_PL_CFG_L2HDR | + NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + NGBE_RDB_PL_CFG_TUN_TUNHDR; + + + for_each_set_bit(pool, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + wr32(hw, NGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } +} + +/** + * ngbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void ngbe_configure_bridge_mode(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + if (adapter->flags & NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, 0); + } else { + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, NGBE_PSR_CTL_SW_EN); + } +} + +static void ngbe_configure_virtualization(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + u8 vfe = 0; + + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return; + + wr32m(hw, NGBE_PSR_VM_CTL, + NGBE_PSR_VM_CTL_POOL_MASK | + NGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << NGBE_PSR_VM_CTL_POOL_SHIFT | + NGBE_PSR_VM_CTL_REPLEN); + + for_each_set_bit(i, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + wr32m(hw, NGBE_PSR_VM_L2CTL(i), + NGBE_PSR_VM_L2CTL_AUPE, NGBE_PSR_VM_L2CTL_AUPE); + } + + vfe = 1 << (VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, NGBE_RDM_POOL_RE, vfe); + wr32(hw, NGBE_TDM_POOL_TE, vfe); + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return; + + /* configure default bridge settings */ + ngbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + NGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_FC), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | + NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + if (!adapter->vfinfo[i].spoofchk_enabled) + ngbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); +#endif + /* enable ethertype anti spoofing if hw supports it */ + hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + } +} + +static void ngbe_set_rx_buffer_len(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct ngbe_ring *rx_ring; + int i; + u32 mhadd; +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + u16 rx_buf_len; +#endif + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, NGBE_PSR_MAX_SZ); + if (max_frame != mhadd) { + wr32(hw, NGBE_PSR_MAX_SZ, max_frame); + } + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) { + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + } else if (max_frame <= NGBE_RXBUFFER_3K) { + rx_buf_len = NGBE_RXBUFFER_3K; + } else if (max_frame <= NGBE_RXBUFFER_7K) { + rx_buf_len = NGBE_RXBUFFER_7K; + } else if (max_frame <= NGBE_RXBUFFER_15K) { + rx_buf_len = NGBE_RXBUFFER_15K; + } else { + rx_buf_len = NGBE_MAX_RXBUFFER; + } +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + + if (adapter->flags & NGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = NGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else + clear_ring_hs_enabled(rx_ring); + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = rx_buf_len; +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ + } +} + +/** + * ngbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ngbe_configure_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; + + /* disable receives while setting up the descriptors */ + hw->mac.ops.disable_rx(hw); + + ngbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_CRC_STRIP, NGBE_RSEC_CTL_CRC_STRIP); + + /* Program registers for the distribution of queues */ + ngbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ngbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= NGBE_RDB_PB_CTL_PBEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#if defined(NETIF_F_HW_VLAN_CTAG_TX) || defined(NETIF_F_HW_VLAN_STAG_TX) +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX || NETIF_F_HW_VLAN_STAG_TX*/ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ +static void ngbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { +#ifndef HAVE_VLAN_RX_REGISTER + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); +#endif + hw->mac.ops.set_vfta(hw, vid, pool_ndx, true); + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + NGBE_MAX_MACVLANS) + hw->mac.ops.set_vfta(hw, vid, + VMDQ_P(i), true); + } + } +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* + * Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) +static int ngbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int ngbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void ngbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#else + return; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); + +#endif /* HAVE_VLAN_RX_REGISTER */ + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) { + hw->mac.ops.set_vfta(hw, vid, pool_ndx, false); + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + NGBE_MAX_MACVLANS) + hw->mac.ops.set_vfta(hw, vid, + VMDQ_P(i), false); + } + } +#ifndef HAVE_VLAN_RX_REGISTER + + clear_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_8021P_SUPPORT +/** + * ngbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, 0); + } +} + +#endif +/** + * ngbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN); + } +} + +#ifdef HAVE_VLAN_RX_REGISTER +static void ngbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) +#else +void ngbe_vlan_mode(struct net_device *netdev, u32 features) +#endif +{ +#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) + struct ngbe_adapter *adapter = netdev_priv(netdev); +#endif +#ifdef HAVE_8021P_SUPPORT + bool enable; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_disable(adapter); + + adapter->vlgrp = grp; + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); +#endif +#ifdef HAVE_8021P_SUPPORT +#ifdef HAVE_VLAN_RX_REGISTER + enable = grp; +#else +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)); +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX); +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_STAG_RX); +#else + enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_VLAN_RX_REGISTER */ + if (enable) + /* enable VLAN tag insert/strip */ + ngbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + ngbe_vlan_strip_disable(adapter); + +#endif /* HAVE_8021P_SUPPORT */ +} + +static void ngbe_restore_vlan(struct ngbe_adapter *adapter) +{ +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_vlan_mode(adapter->netdev, adapter->vlgrp); + + /* + * add vlan ID 0 and enable vlan tag stripping so we + * always accept priority-tagged traffic + */ +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + ngbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + ngbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif +#ifndef HAVE_8021P_SUPPORT + ngbe_vlan_strip_enable(adapter); +#endif + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + ngbe_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else + ngbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif + } + } +#else + struct net_device *netdev = adapter->netdev; + u16 vid; + + ngbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + ngbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +#else + ngbe_vlan_rx_add_vid(netdev, vid); +#endif +#endif +} + +#endif +static u8 *ngbe_addr_list_itr(struct ngbe_hw __maybe_unused *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif +#ifdef CONFIG_PCI_IOV + struct ngbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif + else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * ngbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int ngbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#endif + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + + if (netdev_mc_empty(netdev)) { + hw->mac.ops.update_mc_addr_list(hw, NULL, 0, + ngbe_addr_list_itr, true); + } else { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; +#else + addr_list = netdev->mc_list->dmi_addr; +#endif + addr_count = netdev_mc_count(netdev); + + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + ngbe_addr_list_itr, true); + } + +#ifdef CONFIG_PCI_IOV + ngbe_restore_vf_multicasts(adapter); +#endif + return addr_count; +} + + +void ngbe_full_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + hw->mac.ops.clear_rar(hw, i); + } + adapter->mac_table[i].state &= ~(NGBE_MAC_STATE_MODIFIED); + } +} + +static void ngbe_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + NGBE_MAC_STATE_IN_USE) { + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + hw->mac.ops.clear_rar(hw, i); + } + adapter->mac_table[i].state &= + ~(NGBE_MAC_STATE_MODIFIED); + } + } +} + +int ngbe_available_rars(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, + u8 *addr) +{ + struct ngbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); + adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT | + NGBE_MAC_STATE_IN_USE); + hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); +} + +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools != (1ULL << pool)) { + adapter->mac_table[i].pools |= (1ULL << pool); + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + ngbe_sync_mac_table(adapter); + return i; + } + } + } + } + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + continue; + } + adapter->mac_table[i].state |= (NGBE_MAC_STATE_MODIFIED | + NGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools = (1ULL << pool); + ngbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +static void ngbe_flush_sw_mac_table(struct ngbe_adapter *adapter) +{ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + ngbe_sync_mac_table(adapter); +} + +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools & (1ULL << pool)) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].pools &= ~(1ULL << pool); + if (!adapter->mac_table[i].pools) { + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } + ngbe_sync_mac_table(adapter); + return 0; + } + } + } + return -ENOMEM; +} + +#ifdef HAVE_SET_RX_MODE +/** + * ngbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > ngbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + ngbe_del_mac_filter(adapter, ha->addr, pool); + ngbe_add_mac_filter(adapter, ha->addr, pool); +#else + ngbe_del_mac_filter(adapter, ha->da_addr, pool); + ngbe_add_mac_filter(adapter, ha->da_addr, pool); +#endif + count++; + } + } + return count; +} + +#endif + +/** + * ngbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ngbe_set_rx_mode(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, NGBE_PSR_CTL, + ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, NGBE_PSR_VM_L2CTL(VMDQ_P(0)), + ~(NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, NGBE_PSR_VLAN_CTL, + ~(NGBE_PSR_VLAN_CTL_VFE | + NGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE; + vmolr |= NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_AUPE | + NGBE_PSR_VM_L2CTL_VACC; +#if defined(NETIF_F_HW_VLAN_TX) || \ + defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + vlnctrl |= NGBE_PSR_VLAN_CTL_VFE; +#endif + + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= NGBE_PSR_CTL_MPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (NGBE_PSR_VM_L2CTL_UPE | NGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_SAVE_MAC_ERR, + NGBE_RSEC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= NGBE_PSR_VM_L2CTL_ROPE | NGBE_PSR_VM_L2CTL_ROMPE; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ngbe_write_uc_addr_list(netdev, VMDQ_P(0)); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROPE; + vmolr |= NGBE_PSR_VM_L2CTL_UPE; + } + + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = ngbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + wr32(hw, NGBE_PSR_VLAN_CTL, vlnctrl); + wr32(hw, NGBE_PSR_CTL, fctrl); + wr32(hw, NGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (netdev->features & NETIF_F_HW_VLAN_STAG_RX)) +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) +#else + if (netdev->features & NETIF_F_HW_VLAN_RX) +#endif + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); +} + +static void ngbe_napi_enable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL + ngbe_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +static void ngbe_napi_disable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while (!ngbe_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif + } +} + +#ifdef NETIF_F_GSO_PARTIAL +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define NGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif /* NETIF_F_GSO_PARTIAL */ + +static inline unsigned long ngbe_tso_features(void) +{ + unsigned long features = 0; + +#ifdef NETIF_F_TSO + features |= NETIF_F_TSO; +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_TSO6 + features |= NETIF_F_TSO6; +#endif /* NETIF_F_TSO6 */ +#ifdef NETIF_F_GSO_PARTIAL + features |= NETIF_F_GSO_PARTIAL | NGBE_GSO_PARTIAL_FEATURES; +#endif + + return features; +} + +#ifndef NGBE_NO_LLI +static void ngbe_configure_lli(struct ngbe_adapter *adapter) +{ + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) + return; + + if (adapter->lli_etype) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + (NGBE_RDB_5T_CTL1_LLI | + NGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, NGBE_RDB_ETYPE_CLS(0), + NGBE_RDB_ETYPE_CLS_LLI); + wr32(&adapter->hw, NGBE_PSR_ETYPE_SWC(0), + (adapter->lli_etype | + NGBE_PSR_ETYPE_SWC_FILTER_EN)); + } + + if (adapter->lli_port) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + (NGBE_RDB_5T_CTL1_LLI | + NGBE_RDB_5T_CTL1_SIZE_BP)); + + wr32(&adapter->hw, NGBE_RDB_5T_CTL0(0), + (NGBE_RDB_5T_CTL0_POOL_MASK_EN | + (NGBE_RDB_5T_CTL0_PRIORITY_MASK << + NGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (NGBE_RDB_5T_CTL0_DEST_PORT_MASK << + NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + + wr32(&adapter->hw, NGBE_RDB_5T_SDP(0), + (adapter->lli_port << 16)); + } + + if (adapter->lli_size) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + NGBE_RDB_5T_CTL1_LLI); + wr32m(&adapter->hw, NGBE_RDB_LLI_THRE, + NGBE_RDB_LLI_THRE_SZ(~0), adapter->lli_size); + wr32(&adapter->hw, NGBE_RDB_5T_CTL0(0), + (NGBE_RDB_5T_CTL0_POOL_MASK_EN | + (NGBE_RDB_5T_CTL0_PRIORITY_MASK << + NGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (NGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK << + NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + wr32m(&adapter->hw, NGBE_RDB_LLI_THRE, + NGBE_RDB_LLI_THRE_PRIORITY_EN | + NGBE_RDB_LLI_THRE_UP(~0), + NGBE_RDB_LLI_THRE_PRIORITY_EN | + (adapter->lli_vlan_pri << NGBE_RDB_LLI_THRE_UP_SHIFT)); + } +} + +#endif /* NGBE_NO_LLI */ +/* Additional bittime to account for NGBE framing */ +#define NGBE_ETH_FRAMING 20 + + +/* + * ngbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int ngbe_hpbthresh(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NGBE_ETH_FRAMING; + + /* Calculate delay value for device */ + dv_id = NGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + dv_id += NGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = NGBE_BT2KB(dv_id); + rx_pba = rd32(hw, NGBE_RDB_PB_SZ) + >> NGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer can not provide enough" + "headroom to suppport flow control." + "Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/* + * ngbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int ngbe_lpbthresh(struct ngbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = NGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return NGBE_BT2KB(dv_id); +} + + +/* + * ngbe_pbthresh_setup - calculate and setup high low water marks + */ + +static void ngbe_pbthresh_setup(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + + if (!num_tc) + num_tc = 1; + + hw->fc.high_water = ngbe_hpbthresh(adapter); + hw->fc.low_water = ngbe_lpbthresh(adapter); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water > hw->fc.high_water) + hw->fc.low_water = 0; +} + +static void ngbe_configure_pb(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int hdrm = 0; + int tc = netdev_get_num_tc(adapter->netdev); + + hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + ngbe_pbthresh_setup(adapter); +} + +static void ngbe_configure_isb(struct ngbe_adapter *adapter) +{ + /* set ISB Address */ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +} + +static void ngbe_configure_port(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 value, i; + + value = (adapter->num_vfs == 0) ? + NGBE_CFG_PORT_CTL_NUM_VT_NONE : + NGBE_CFG_PORT_CTL_NUM_VT_8; + + /* enable double vlan and qinq, NONE VT at default */ + value |= NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ | + NGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + wr32(hw, NGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, NGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +#ifdef HAVE_VIRTUAL_STATION +static void ngbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, + struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 vmolr; + + /* No unicast promiscuous support for VMDQ devices. */ + vmolr = rd32m(hw, NGBE_PSR_VM_L2CTL(pool), + ~(NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_ROMPE)); + + /* set all bits that we expect to always be set */ + vmolr |= NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_BAM | NGBE_PSR_VM_L2CTL_AUPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } else { + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + ngbe_write_mc_addr_list(dev); + } + + ngbe_write_uc_addr_list(adapter->netdev, pool); + wr32(hw, NGBE_PSR_VM_L2CTL(pool), vmolr); +} + +static void ngbe_fwd_psrtype(struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + struct ngbe_hw *hw = &adapter->hw; + u32 psrtype = NGBE_RDB_PL_CFG_L4HDR | + NGBE_RDB_PL_CFG_L3HDR | + NGBE_RDB_PL_CFG_L2HDR | + NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + NGBE_RDB_PL_CFG_TUN_TUNHDR; + + + wr32(hw, NGBE_RDB_PL_CFG(VMDQ_P(accel->index)), psrtype); +} + +static void ngbe_disable_fwd_ring(struct ngbe_fwd_adapter *accel, + struct ngbe_ring *rx_ring) +{ + struct ngbe_adapter *adapter = accel->adapter; + int index = rx_ring->queue_index + accel->rx_base_queue; + + /* shutdown specific queue receive and wait for dma to settle */ + ngbe_disable_rx_queue(adapter, rx_ring); + usleep_range(10000, 20000); + ngbe_intr_disable(&adapter->hw, NGBE_INTR_Q(index)); + ngbe_clean_rx_ring(rx_ring); + rx_ring->accel = NULL; +} + +static void ngbe_enable_fwd_ring(struct ngbe_fwd_adapter *accel, + struct ngbe_ring *rx_ring) +{ + struct ngbe_adapter *adapter = accel->adapter; + int index = rx_ring->queue_index + accel->rx_base_queue; + + ngbe_intr_enable(&adapter->hw, NGBE_INTR_Q(index)); +} + +static int ngbe_fwd_ring_down(struct net_device *vdev, + struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int txbase = accel->tx_base_queue; + int i; + + netif_tx_stop_all_queues(vdev); + + for (i = 0; i < adapter->queues_per_pool; i++) { + ngbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; + } + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->accel = NULL; + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; + } + + return 0; +} + +static int ngbe_fwd_ring_up(struct net_device *vdev, + struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + unsigned int rxbase, txbase, queues; + int i, baseq, err = 0; + + if (!test_bit(accel->index, &adapter->fwd_bitmask)) + return 0; + + baseq = VMDQ_P(accel->index) * adapter->queues_per_pool; + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->index, adapter->num_vmdqs, + baseq, baseq + adapter->queues_per_pool, + adapter->fwd_bitmask); + + accel->vdev = vdev; + accel->rx_base_queue = rxbase = baseq; + accel->tx_base_queue = txbase = baseq; + + for (i = 0; i < adapter->queues_per_pool; i++) + ngbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->rx_ring[rxbase + i]->netdev = vdev; + adapter->rx_ring[rxbase + i]->accel = accel; + ngbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); + } + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->netdev = vdev; + adapter->tx_ring[txbase + i]->accel = accel; + } + + queues = min_t(unsigned int, + adapter->queues_per_pool, vdev->num_tx_queues); + err = netif_set_real_num_tx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + err = netif_set_real_num_rx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + if (is_valid_ether_addr(vdev->dev_addr)) + ngbe_add_mac_filter(adapter, vdev->dev_addr, + VMDQ_P(accel->index)); + + ngbe_fwd_psrtype(accel); + ngbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->index), adapter); + + for (i = 0; i < adapter->queues_per_pool; i++) + ngbe_enable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + return err; +fwd_queue_err: + ngbe_fwd_ring_down(vdev, accel); + return err; +} + +static void ngbe_configure_dfwd(struct ngbe_adapter *adapter) +{ + struct net_device *upper; + struct list_head *iter; + int err; + + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ngbe_fwd_adapter *accel = dfwd->fwd_priv; + + if (accel) { + err = ngbe_fwd_ring_up(upper, accel); + if (err) + continue; + } + } + } +} +#endif /*HAVE_VIRTUAL_STATION*/ + +static void ngbe_configure(struct ngbe_adapter *adapter) +{ + ngbe_configure_pb(adapter); + + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ngbe_configure_virtualization(adapter); + /* configure Double Vlan */ + ngbe_configure_port(adapter); + + ngbe_set_rx_mode(adapter->netdev); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + ngbe_restore_vlan(adapter); +#endif + + ngbe_configure_tx(adapter); + ngbe_configure_rx(adapter); + ngbe_configure_isb(adapter); +#ifdef HAVE_VIRTUAL_STATION + ngbe_configure_dfwd(adapter); +#endif +} + + +/** + * ngbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_non_sfp_link_config(struct ngbe_hw *hw) +{ + u32 speed; + u32 ret = NGBE_ERR_LINK_SETUP; + struct ngbe_adapter *adapter = hw->back; + + if (hw->mac.autoneg) + speed = hw->phy.autoneg_advertised; + else + speed = hw->phy.force_speed; + + if (hw->ncsi_enabled || + (hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA || + adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP) + return 0; + + if (hw->phy.type == ngbe_phy_internal || + hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + hw->phy.ops.phy_resume(hw); + hw->phy.ops.setup_once(hw); + } + + ret = hw->mac.ops.setup_link(hw, speed, false); + + return ret; +} + +#if 0 +/** + * ngbe_clear_vf_stats_counters - Clear out VF stats after reset + * @adapter: board private structure + * + * On a reset we need to clear out the VF stats or accounting gets + * messed up because they're not clear on read. + **/ +static void ngbe_clear_vf_stats_counters(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + adapter->vfinfo->last_vfstats.gprc = + rd32(hw, NGBE_VX_GPRC); + adapter->vfinfo->saved_rst_vfstats.gprc += + adapter->vfinfo->vfstats.gprc; + adapter->vfinfo->vfstats.gprc = 0; + adapter->vfinfo->last_vfstats.gptc = + rd32(hw, NGBE_VX_GPTC); + adapter->vfinfo->saved_rst_vfstats.gptc += + adapter->vfinfo->vfstats.gptc; + adapter->vfinfo->vfstats.gptc = 0; + adapter->vfinfo->last_vfstats.gorc = + rd32(hw, NGBE_VX_GORC_LSB); + adapter->vfinfo->saved_rst_vfstats.gorc += + adapter->vfinfo->vfstats.gorc; + adapter->vfinfo->vfstats.gorc = 0; + adapter->vfinfo->last_vfstats.gotc = + rd32(hw, NGBE_VX_GOTC_LSB); + adapter->vfinfo->saved_rst_vfstats.gotc += + adapter->vfinfo->vfstats.gotc; + adapter->vfinfo->vfstats.gotc = 0; + adapter->vfinfo->last_vfstats.mprc = + rd32(hw, NGBE_VX_MPRC); + adapter->vfinfo->saved_rst_vfstats.mprc += + adapter->vfinfo->vfstats.mprc; + adapter->vfinfo->vfstats.mprc = 0; +} +#endif + +static void ngbe_setup_gpie(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + gpie = NGBE_PX_GPIE_MODEL; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + } + + wr32(hw, NGBE_PX_GPIE, gpie); +} + +static void ngbe_up_complete(struct ngbe_adapter *adapter) +{ + + struct ngbe_hw *hw = &adapter->hw; + int err; + + ngbe_get_hw_control(adapter); + ngbe_setup_gpie(adapter); + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + ngbe_configure_msix(adapter); + else + ngbe_configure_msi_and_legacy(adapter); + + smp_mb__before_atomic(); + clear_bit(__NGBE_DOWN, &adapter->state); + ngbe_napi_enable_all(adapter); +#ifndef NGBE_NO_LLI + ngbe_configure_lli(adapter); +#endif + + err = ngbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + + /* sellect GMII */ + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) | + NGBE_MAC_TX_CFG_SPEED_1G); + + /* clear any pending interrupts, may auto mask */ + rd32(hw, NGBE_PX_IC); + rd32(hw, NGBE_PX_MISC_IC); + ngbe_irq_enable(adapter, true, true); + + if (hw->gpio_ctl == 1) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, 0); + + /* enable transmits */ +// if (!((adapter->num_rx_queues == 0) && (adapter->num_tx_queues == 0))) + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (NGBE_POLL_LINK_STATUS == 1) + mod_timer(&adapter->link_check_timer, jiffies); + mod_timer(&adapter->service_timer, jiffies); + /* ngbe_clear_vf_stats_counters(adapter); */ + + if (hw->bus.lan_id == 0) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN0_UP, NGBE_MIS_PRB_CTL_LAN0_UP); + } + else if (hw->bus.lan_id == 1) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN1_UP, NGBE_MIS_PRB_CTL_LAN1_UP); + } + else if (hw->bus.lan_id == 2) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN2_UP, NGBE_MIS_PRB_CTL_LAN2_UP); + } + else if (hw->bus.lan_id == 3) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN3_UP, NGBE_MIS_PRB_CTL_LAN3_UP); + } + else + e_err(probe, "ngbe_up_complete:invalid bus lan id %d\n", hw->bus.lan_id); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_PFRSTD, NGBE_CFG_PORT_CTL_PFRSTD); + + /* clear ecc reset flag if set */ + if (adapter->flags2 & NGBE_FLAG2_ECC_ERR_RESET) { + adapter->flags2 &= ~NGBE_FLAG2_ECC_ERR_RESET; + } +} + +void ngbe_reinit_locked(struct ngbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ngbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ngbe_up(adapter); + clear_bit(__NGBE_RESETTING, &adapter->state); +} + +void ngbe_up(struct ngbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ngbe_configure(adapter); + + ngbe_up_complete(adapter); +} + +void ngbe_reset(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + break; + case NGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case NGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + ngbe_flush_sw_mac_table(adapter); + ngbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_reset(adapter); +#endif +} + +/** + * ngbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ngbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + if (NGBE_CB(skb)->dma_released) { + dma_unmap_single(dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + + if (NGBE_CB(skb)->page_released) + dma_unmap_page(dev, + NGBE_CB(skb)->dma, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +#else + /* We need to clean up RSC frag lists */ + skb = ngbe_merge_active_tail(skb); + if (ngbe_close_active_frag_list(skb)) + dma_unmap_single(dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + + __free_pages(rx_buffer->page, + ngbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; +#endif + } + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +#endif +} + +/** + * ngbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * ngbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_rx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ngbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_tx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ngbe_clean_tx_ring(adapter->xdp_ring[i]); +} + +static void ngbe_disable_device(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__NGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, NGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* ping all the active vfs to let them know we are going down */ + ngbe_ping_all_vfs_with_link_status(adapter, false); + + /* Disable all VFTE/VFRE TX/RX */ + ngbe_disable_tx_rx(adapter); + } + + if (!(adapter->flags2 & NGBE_FLAG2_ECC_ERR_RESET)) + ngbe_disable_pcie_master(hw); + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ngbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + +#ifdef HAVE_VIRTUAL_STATION + /* disable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + } +#endif + + if (hw->gpio_ctl == 1) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_rcu(); + + ngbe_irq_disable(adapter); + + ngbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + if (NGBE_POLL_LINK_STATUS == 1) + del_timer_sync(&adapter->link_check_timer); + + if (hw->bus.lan_id == 0) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN0_UP, 0); + else if (hw->bus.lan_id == 1) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN1_UP, 0); + else if (hw->bus.lan_id == 2) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN2_UP, 0); + else if (hw->bus.lan_id == 3) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN3_UP, 0); + else + e_dev_err("ngbe_disable_device:invalid bus lan id %d\n", hw->bus.lan_id); + + /*OCP NCSI need it*/ + if (!((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP || + hw->ncsi_enabled || + adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP)) + /* disable mac transmiter */ + wr32m(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + wr32(hw, NGBE_PX_TR_CFG(reg_idx), + NGBE_PX_TR_CFG_SWFLSH); + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + u8 reg_idx = adapter->xdp_ring[i]->reg_idx; + wr32(hw, NGBE_PX_TR_CFG(reg_idx), + NGBE_PX_TR_CFG_SWFLSH); + } + /* Disable the Tx DMA engine */ + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); +} + + +void ngbe_down(struct ngbe_adapter *adapter) +{ + ngbe_disable_device(adapter); + +#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) +#endif + ngbe_reset(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); +} + +/** + * ngbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ngbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +static int ngbe_init_shared_code(struct ngbe_hw *hw) +{ + int wol_mask = 0, ncsi_mask = 0; + u16 type_mask = 0, val; + u32 lan_en; + + lan_en = rd32(hw, NGBE_MIS_PWR); + if (!(lan_en & BIT(hw->bus.lan_id + 28))) + return -EIO; + type_mask = (u16)(hw->subsystem_device_id & OEM_MASK); + ncsi_mask = hw->subsystem_device_id & NCSI_SUP_MASK; + wol_mask = hw->subsystem_device_id & WOL_SUP_MASK; + + val = rd32(hw, NGBE_CFG_PORT_ST); + hw->mac_type = (val & BIT(7)) >> 7 ? + em_mac_type_rgmii : + em_mac_type_mdi; + + hw->wol_enabled = (wol_mask == WOL_SUP) ? 1 : 0; + hw->ncsi_enabled = (ncsi_mask == NCSI_SUP || + type_mask == OCP_CARD) ? 1 : 0; + + switch (type_mask) { + case LY_YT8521S_SFP: + case LY_M88E1512_SFP: + case YT8521S_SFP_GPIO: + case INTERNAL_YT8521S_SFP_GPIO: + hw->gpio_ctl = 1; + break; + default: + hw->gpio_ctl = 0; + break; + } + + switch (type_mask) { + case M88E1512_SFP: + case LY_M88E1512_SFP: + hw->phy.type = ngbe_phy_m88e1512_sfi; + break; + case M88E1512_RJ45: + hw->phy.type = ngbe_phy_m88e1512; + break; + case M88E1512_MIX: + hw->phy.type = ngbe_phy_m88e1512_unknown; + break; + case YT8521S_SFP: + case YT8521S_SFP_GPIO: + case LY_YT8521S_SFP: + hw->phy.type = ngbe_phy_yt8521s_sfi; + break; + case INTERNAL_YT8521S_SFP: + case INTERNAL_YT8521S_SFP_GPIO: + hw->phy.type = ngbe_phy_internal_yt8521s_sfi; + break; + default: + hw->phy.type = ngbe_phy_internal; + break; + } + + /* select claus22 */ + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + ngbe_init_ops(hw); + + return 0; +} + +/** + * ngbe_sw_init - Initialize general software structures (struct ngbe_adapter) + * @adapter: board private structure to initialize + * + * ngbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int __devinit ngbe_sw_init(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + u32 ssid = 0; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == NGBE_FAILED_READ_CFG_BYTE && + ngbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } + + hw->oem_svid = pdev->subsystem_vendor; + hw->oem_ssid = pdev->subsystem_device; + if (pdev->subsystem_vendor == 0x8088) { + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + } else { + err = ngbe_flash_read_dword(hw, 0xfffdc, &ssid); + if (err) { + e_err(probe, "read of internel subsystem device id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_device_id = (u16)ssid; + hw->subsystem_device_id = hw->subsystem_device_id >> 8 | + hw->subsystem_device_id << 8; + } + + /* phy type, phy ops, mac ops */ + err = ngbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + adapter->mac_table = kzalloc(sizeof(struct ngbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = NGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); + + /* Set common capability flags and settings */ + adapter->max_q_vectors = NGBE_MAX_MSIX_Q_VECTORS_EMERALD; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= NGBE_FLAGS_SP_INIT; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_CAPABLE; + adapter->flags2 |= NGBE_FLAG2_EEE_CAPABLE; + + /* init mailbox params */ + hw->mbx.ops.init_params(hw); + + /* default flow control settings */ + hw->fc.requested_mode = ngbe_fc_full; + hw->fc.current_mode = ngbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = NGBE_DEFAULT_TXD; + adapter->rx_ring_count = NGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + + /* PF holds first pool slot */ + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); + set_bit(__NGBE_DOWN, &adapter->state); + if (NGBE_LINK_RETRY == 1) + hw->restart_an = 0; + +out: + return err; +} + +/** + * ngbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ngbe_setup_tx_resources(struct ngbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ngbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i = 0, j = 0, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ngbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = ngbe_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx(XDP) Queue %u failed\n", j); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (j--) + ngbe_free_tx_resources(adapter->xdp_ring[j]); + while (i--) + ngbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} +#ifdef HAVE_XDP_BUFF_RXQ +static int ngbe_rx_napi_id(struct ngbe_ring *rx_ring) +{ + struct ngbe_q_vector *q_vector = rx_ring->q_vector; + + return q_vector ? q_vector->napi.napi_id : 0; +} +#endif +/** + * ngbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ngbe_setup_rx_resources(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ngbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + if (!rx_ring->q_vector) + return 0; +#ifdef HAVE_XDP_BUFF_RXQ + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, + ngbe_rx_napi_id(rx_ring)) < 0) + goto err; +#endif + rx_ring->xdp_prog = rx_ring->q_vector->adapter->xdp_prog; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ngbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) { + continue; + } + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ngbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * ngbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * NGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) { + e_err(probe, "ngbe_setup_isb_resources: alloc isb_mem failed\n"); + return -ENOMEM; + } + memset(adapter->isb_mem, 0, sizeof(u32) * NGBE_ISB_MAX); + return 0; +} + +/** + * ngbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static void ngbe_free_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * NGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +/** + * ngbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ngbe_free_tx_resources(struct ngbe_ring *tx_ring) +{ + ngbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * ngbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ngbe_free_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ngbe_free_tx_resources(adapter->xdp_ring[i]); +} + +/** + * ngbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ngbe_free_rx_resources(struct ngbe_ring *rx_ring) +{ + ngbe_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; + +#ifdef HAVE_XDP_BUFF_RXQ + if(rx_ring->q_vector) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +#endif + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ngbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ngbe_free_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ngbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +#endif + +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > (NGBE_MAX_JUMBO_FRAME_SIZE))) + return -EINVAL; +#else + if ((new_mtu < 68) || (new_mtu > 9414)) + return -EINVAL; + +#endif + + /* + * we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) +#else + (new_mtu > ETH_DATA_LEN)) +#endif + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +/** + * ngbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int ngbe_open(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__NGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ngbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ngbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = ngbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + ngbe_configure(adapter); + + err = ngbe_request_irq(adapter); + if (err) + goto err_req_irq; + + if (adapter->num_tx_queues) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); + if (err) + goto err_set_queues; + } + + if (adapter->num_rx_queues) { + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + +#ifdef HAVE_PTP_1588_CLOCK + ngbe_ptp_init(adapter); +#endif + + ngbe_up_complete(adapter); + + return 0; + +err_set_queues: + ngbe_free_irq(adapter); +err_req_irq: + ngbe_free_isb_resources(adapter); +err_req_isb: + ngbe_free_all_rx_resources(adapter); + +err_setup_rx: + ngbe_free_all_tx_resources(adapter); +err_setup_tx: + ngbe_reset(adapter); + return err; +} + +/** + * ngbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ngbe_close_suspend(struct ngbe_adapter *adapter) +{ +#ifdef HAVE_PTP_1588_CLOCK + ngbe_ptp_suspend(adapter); +#endif + ngbe_disable_device(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); + + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); +} + +/** + * ngbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int ngbe_close(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_PTP_1588_CLOCK + ngbe_ptp_stop(adapter); +#endif + + ngbe_down(adapter); + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); + + ngbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int ngbe_resume(struct device *dev) +#else +static int ngbe_resume(struct pci_dev *pdev) +#endif /* USE_LEGACY_PM_SUPPORT */ +{ + struct ngbe_adapter *adapter; + struct net_device *netdev; + u32 err; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + wr32(&adapter->hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + rtnl_lock(); + + err = ngbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ngbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} + +#ifndef USE_LEGACY_PM_SUPPORT +/** + * ngbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int ngbe_freeze(struct device *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ngbe_down(adapter); + ngbe_free_irq(adapter); + } + + ngbe_reset_interrupt_capability(adapter); + + return 0; +} + +/** + * ngbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int ngbe_thaw(struct device *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + ngbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = ngbe_request_irq(adapter); + if (err) + return err; + + ngbe_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif /* CONFIG_PM */ + +/* + * __ngbe_shutdown is not used when power manangbeent + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) +static int __ngbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + rtnl_lock(); + if (netif_running(netdev)) + ngbe_close_suspend(adapter); + rtnl_unlock(); + + ngbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + /* this won't stop link of managebility or WoL is enabled */ + ngbe_stop_mac_link_on_d3(hw); + + if (wufc) { + ngbe_set_rx_mode(netdev); + ngbe_configure_rx(adapter); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & NGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_MPE, NGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, NGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, NGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + *enable_wake = !!wufc; + ngbe_release_hw_control(adapter); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int ngbe_suspend(struct device *dev) +#else +static int ngbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +#endif /* USE_LEGACY_PM_SUPPORT */ +{ + int retval; + bool wake; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + + retval = __ngbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +#ifndef USE_REBOOT_NOTIFIER +static void ngbe_shutdown(struct pci_dev *pdev) +{ + bool wake = 0; + + __ngbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#endif +#ifdef HAVE_NDO_GET_STATS64 +/** + * ngbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ngbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void ngbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *ngbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ngbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} +#else +/** + * ngbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ngbe_get_stats(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + ngbe_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif + +/** + * ngbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ngbe_update_stats(struct ngbe_adapter *adapter) +{ + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; +#ifndef NGBE_NO_LRO + u32 flushed = 0, coal = 0; +#endif + + + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + +#ifndef NGBE_NO_LRO + for (i = 0; i < adapter->num_q_vectors; i++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[i]; + if (!q_vector) + continue; + flushed += q_vector->lrolist.stats.flushed; + coal += q_vector->lrolist.stats.coal; + } + adapter->lro_stats.flushed = flushed; + adapter->lro_stats.coal = coal; + + +#endif + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ngbe_ring *xdp_ring = adapter->xdp_ring[i]; + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + + hwstats->gprc += rd32(hw, NGBE_PX_GPRC); + + ngbe_update_xoff_rx_lfc(adapter); + + hwstats->o2bgptc += rd32(hw, NGBE_TDM_OS2BMC_CNT); + if (ngbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, NGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, NGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, NGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, NGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, NGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, NGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, NGBE_PX_GOTC_MSB) << 32; + + + adapter->hw_rx_no_dma_resources += + rd32(hw, NGBE_RDM_DRP_PKT); + bprc = rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + + for (i = 0; i < 8; i++) + hwstats->mprc += rd32(hw, NGBE_PX_MPRC(i)); + + hwstats->roc += rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, NGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, NGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, NGBE_PX_GPTC); + hwstats->mptc += rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + total_mpc = rd32(hw, NGBE_RDB_MPCNT); + net_stats->rx_missed_errors += total_mpc; + + /* + * VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__NGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(NGBE_VX_GPRC, \ + adapter->vfinfo->last_vfstats.gprc, \ + adapter->vfinfo->vfstats.gprc); + UPDATE_VF_COUNTER_32bit(NGBE_VX_GPTC, \ + adapter->vfinfo->last_vfstats.gptc, \ + adapter->vfinfo->vfstats.gptc); + UPDATE_VF_COUNTER_36bit(NGBE_VX_GORC_LSB, \ + NGBE_VX_GORC_MSB, \ + adapter->vfinfo->last_vfstats.gorc, \ + adapter->vfinfo->vfstats.gorc); + UPDATE_VF_COUNTER_36bit(NGBE_VX_GOTC_LSB, \ + NGBE_VX_GOTC_MSB, \ + adapter->vfinfo->last_vfstats.gotc, \ + adapter->vfinfo->vfstats.gotc); + UPDATE_VF_COUNTER_32bit(NGBE_VX_MPRC, \ + adapter->vfinfo->last_vfstats.mprc, \ + adapter->vfinfo->vfstats.mprc); + } + } + +} + +/** + * ngbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void ngbe_check_hang_subtask(struct ngbe_adapter *adapter) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); + } +} + +static void ngbe_watchdog_an_complete(struct ngbe_adapter *adapter) +{ + u32 link_speed = 0; + u32 lan_speed = 0; + bool link_up = true; + struct ngbe_hw *hw = &adapter->hw; + + if (!(adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + return; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + + + adapter->flags &= ~NGBE_FLAG_NEED_ANC_CHECK; + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + + return; +} + +/** + * ngbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ngbe_watchdog_update_link_status(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + u32 lan_speed = 0; + u32 reg; + + if (NGBE_POLL_LINK_STATUS != 1) { + if (!(adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE)) + return; + } + + link_speed = NGBE_LINK_SPEED_1GB_FULL; + link_up = true; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + if (NGBE_POLL_LINK_STATUS != 1) { + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + NGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + } + } else { + if (adapter->link_up == link_up && adapter->link_speed == link_speed) + return; + } + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_up) { + hw->mac.ops.fc_enable(hw); + ngbe_set_rx_drop_en(adapter); + } + + if (link_up) { + +#ifdef HAVE_PTP_1588_CLOCK + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + +#endif + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, NGBE_MAC_RX_CFG); + wr32(hw, NGBE_MAC_RX_CFG, reg); + wr32(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); + wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); + } + + adapter->link_up = link_up; + /* hw->mac.ops.dmac_config is null*/ + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.dmac_config.link_speed != link_speed || + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; + hw->mac.ops.dmac_config(hw); + } + } + return; +} + +static void ngbe_update_default_up(struct ngbe_adapter *adapter) +{ + u8 up = 0; + adapter->default_up = up; +} + +/** + * ngbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_link_is_up(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, NGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(NGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, NGBE_RDB_RFCC)); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == NGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == NGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == NGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + + + + netif_tx_wake_all_queues(netdev); +#ifdef HAVE_VIRTUAL_STATION + /* enable any upper devices */ + rtnl_lock(); + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + } + rtnl_unlock(); +#endif + /* update the default user priority for VFs */ + ngbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + ngbe_ping_all_vfs_with_link_status(adapter, adapter->link_up); +} + +/** + * ngbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void ngbe_watchdog_link_is_down(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + +#endif + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + ngbe_ping_all_vfs_with_link_status(adapter, adapter->link_up); +} + +static bool ngbe_ring_tx_pending(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ngbe_ring *xdp_ring = adapter->xdp_ring[i]; + + if (xdp_ring->next_to_use != xdp_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ngbe_vf_tx_pending(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + NGBE_PX_TR_RPn(q_per_pool, i, j)); + t = rd32(hw, + NGBE_PX_TR_WPn(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * ngbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_flush_tx(struct ngbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (ngbe_ring_tx_pending(adapter) || + ngbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with " + "pending Tx work\n"); + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void ngbe_issue_vf_flr(struct ngbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + + +static void ngbe_spoof_check(struct ngbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check if in non-IOV mode */ + if (adapter->num_vfs == 0) + return; + ssvpc = rd32(&adapter->hw, NGBE_TDM_SEC_DRP); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +#endif /* CONFIG_PCI_IOV */ + +/** + * ngbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_subtask(struct ngbe_adapter *adapter) +{ + + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_an_complete(adapter); + + if (NGBE_POLL_LINK_STATUS != 1) { + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); + } +#ifdef CONFIG_PCI_IOV + ngbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + + ngbe_update_stats(adapter); + + ngbe_watchdog_flush_tx(adapter); +} + +/** + * ngbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ngbe_service_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + struct ngbe_hw *hw = &adapter->hw; + u32 val = 0; + + /* poll faster when waiting for link */ + if ((adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE) || + (adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* flags to records which func to handle pcie recovery */ + if (rd32(&adapter->hw, NGBE_MIS_PF_SM) == 1) { + val = rd32m(&adapter->hw, NGBE_MIS_PRB_CTL, NGBE_MIS_PRB_CTL_LAN0_UP | + NGBE_MIS_PRB_CTL_LAN1_UP | + NGBE_MIS_PRB_CTL_LAN2_UP | + NGBE_MIS_PRB_CTL_LAN3_UP); + if (val & NGBE_MIS_PRB_CTL_LAN0_UP) { + if (hw->bus.lan_id == 0) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan0\n"); + } + } else if (val & NGBE_MIS_PRB_CTL_LAN1_UP) { + if (hw->bus.lan_id == 1) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan1\n"); + } + } else if (val & NGBE_MIS_PRB_CTL_LAN2_UP) { + if (hw->bus.lan_id == 2) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan2\n"); + } + } else if (val & NGBE_MIS_PRB_CTL_LAN3_UP) { + if (hw->bus.lan_id == 3) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan3\n"); + } + } + } + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ngbe_service_event_schedule(adapter); +} + +static void ngbe_link_check_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, link_check_timer); + unsigned long next_event_offset = HZ / 1000; + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi) + next_event_offset = HZ / 10; + + mod_timer(&adapter->link_check_timer, next_event_offset + jiffies); + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); +} + +static void ngbe_reset_subtask(struct ngbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + + if (!(adapter->flags2 & (NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED | + NGBE_FLAG2_RESET_INTR_RECEIVED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_PF_RESET_REQUESTED; + } + + if (adapter->flags2 & NGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~NGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) { + adapter->hw.reset_type = NGBE_SW_RESET; + + } else if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) + adapter->hw.reset_type = NGBE_GLOBAL_RESET; + adapter->hw.force_full_reset = TRUE; + ngbe_reinit_locked(adapter); + adapter->hw.force_full_reset = FALSE; + goto unlock; + } + + if (reset_flag & NGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to open*/ + /*ngbe_dump(adapter);*/ + + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_SW_RST, NGBE_MIS_RST_SW_RST); + e_info(drv, "ngbe_reset_subtask: sw reset\n"); + + } else if (reset_flag & NGBE_FLAG2_PF_RESET_REQUESTED) { + /*debug to open*/ + /*ngbe_dump(adapter);*/ + ngbe_reinit_locked(adapter); + } else if (reset_flag & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to open*/ + /*ngbe_dump(adapter);*/ + pci_save_state(adapter->pdev); + if (ngbe_mng_present(&adapter->hw)) { + ngbe_reset_hostif(&adapter->hw); + e_info(drv, "ngbe_reset_subtask: lan reset\n"); + + } else { + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_GLOBAL_RST, + NGBE_MIS_RST_GLOBAL_RST); + e_info(drv, "ngbe_reset_subtask: global reset\n"); + } + } + +unlock: + rtnl_unlock(); +} + +static void ngbe_check_pcie_subtask(struct ngbe_adapter *adapter) +{ + if (!(adapter->flags2 & NGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + ngbe_print_tx_hang_status(adapter); + + wr32m(&adapter->hw, NGBE_MIS_PF_SM, NGBE_MIS_PF_SM_SM, 0); + if ((NGBE_PCIE_RECOVER == 1) && !(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) { + e_info(probe, "do recovery\n"); + ngbe_pcie_do_recovery(adapter->pdev); + } + adapter->flags2 &= ~NGBE_FLAG2_PCIE_NEED_RECOVER; +} + +/** + * ngbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ngbe_service_task(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, + struct ngbe_adapter, + service_task); + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ngbe_down(adapter); + rtnl_unlock(); + } + ngbe_service_event_complete(adapter); + return; + } + + ngbe_check_pcie_subtask(adapter); + ngbe_reset_subtask(adapter); + ngbe_check_overtemp_subtask(adapter); + ngbe_watchdog_subtask(adapter); + ngbe_check_hang_subtask(adapter); +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) { + ngbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + ngbe_ptp_rx_hang(adapter); + } +#endif /* HAVE_PTP_1588_CLOCK */ + + ngbe_service_event_complete(adapter); +} + +static u8 get_ipv6_proto(struct sk_buff *skb, int offset) +{ + struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); + u8 nexthdr = hdr->nexthdr; + + offset += sizeof(struct ipv6hdr); + + while (ipv6_ext_hdr(nexthdr)) { + struct ipv6_opt_hdr _hdr, *hp; + + if (nexthdr == NEXTHDR_NONE) + break; + + hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); + if (!hp) + break; + + if (nexthdr == NEXTHDR_FRAGMENT) { + break; + } else if (nexthdr == NEXTHDR_AUTH) { + offset += ipv6_authlen(hp); + } else { + offset += ipv6_optlen(hp); + } + + nexthdr = hp->nexthdr; + } + + return nexthdr; +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static ngbe_dptype encode_tx_desc_ptype(const struct ngbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; +#endif + u8 l4_prot = 0; + u8 ptype = 0; + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + tun_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= NGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_prot = get_ipv6_proto(skb, + skb_inner_network_offset(skb)); + ptype |= NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = NGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + l4_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + ptype = NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#endif /* NETIF_F_IPV6_CSUM */ + case __constant_htons(ETH_P_1588): + ptype = NGBE_PTYPE_L2_TS; + goto exit; + case __constant_htons(ETH_P_FIP): + ptype = NGBE_PTYPE_L2_FIP; + goto exit; + case __constant_htons(NGBE_ETH_P_LLDP): + ptype = NGBE_PTYPE_L2_LLDP; + goto exit; + case __constant_htons(NGBE_ETH_P_CNM): + ptype = NGBE_PTYPE_L2_CNM; + goto exit; + case __constant_htons(ETH_P_PAE): + ptype = NGBE_PTYPE_L2_EAPOL; + goto exit; + case __constant_htons(ETH_P_ARP): + ptype = NGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = NGBE_PTYPE_L2_MAC; + goto exit; + } +#ifdef HAVE_ENCAP_TSO_OFFLOAD + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= NGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= NGBE_PTYPE_TYP_UDP; + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + ptype |= NGBE_PTYPE_TYP_SCTP; + break; +#endif /* HAVE_SCTP */ + default: + ptype |= NGBE_PTYPE_TYP_IP; + break; + } + +exit: + return ngbe_decode_ptype(ptype); +} + +static int ngbe_tso(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + u8 *hdr_len, ngbe_dptype dptype) +{ +#ifndef NETIF_F_TSO + return 0; +#else + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; + bool enc = skb->encapsulation; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ +#ifdef NETIF_F_TSO6 + struct ipv6hdr *ipv6h; +#endif + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); +#else + iph = ip_hdr(skb); +#endif + if (iph->version == 4) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); +#else + tcph = tcp_hdr(skb); +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_IPV4 | + NGBE_TX_FLAGS_CC; + +#ifdef NETIF_F_TSO6 + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); +#else + ipv6h = ipv6_hdr(skb); + tcph = tcp_hdr(skb); +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_CC; +#endif /* NETIF_F_TSO6 */ + } + + /* compute header lengths */ +#ifdef HAVE_ENCAP_TSO_OFFLOAD + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; +#else + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << NGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << NGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (enc) { + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= NGBE_TX_FLAGS_OUTER_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else + vlan_macip_lens = skb_network_header_len(skb) >> 1; +#else + vlan_macip_lens = skb_network_header_len(skb) >> 1; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= skb_network_offset(skb) << NGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= NGBE_SET_FLAG(first->tx_flags, + NGBE_TX_FLAGS_HW_VLAN, + 0x1 << NGBE_TXD_TAG_TPID_SEL_SHIFT); +#endif + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +#endif /* !NETIF_F_TSO */ +} + +static void ngbe_tx_csum(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, ngbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; +#endif + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & NGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & NGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv6->nexthdr; + break; + default: + break; + } + +#else /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_prot = ip_hdr(skb)->protocol; + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_prot = ipv6_hdr(skb)->nexthdr; + break; +#endif /* NETIF_F_IPV6_CSUM */ + default: + break; + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_prot) { + case IPPROTO_TCP: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + NGBE_TXD_L4LEN_SHIFT; +#else + mss_l4len_idx = tcp_hdrlen(skb) << + NGBE_TXD_L4LEN_SHIFT; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; +#endif /* HAVE_SCTP */ + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; + default: + break; + } + + /* update TX checksum flag */ + first->tx_flags |= NGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= NGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ +#ifndef HAVE_ENCAP_TSO_OFFLOAD + vlan_macip_lens |= skb_network_offset(skb) << NGBE_TXD_MACLEN_SHIFT; +#endif /* !HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= NGBE_SET_FLAG(first->tx_flags, + NGBE_TX_FLAGS_HW_VLAN, + 0x1 << NGBE_TXD_TAG_TPID_SEL_SHIFT); +#endif + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +static u32 ngbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = NGBE_TXD_DTYP_DATA | + NGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_HW_VLAN, + NGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSO, + NGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSTAMP, + NGBE_TXD_MAC_TSTAMP); + + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_LINKSEC, + NGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void ngbe_tx_olinfo_status(union ngbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << NGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CSUM, + NGBE_TXD_L4CS); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPV4, + NGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_OUTER_IPV4, + NGBE_TXD_EIPCS); + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CC, + NGBE_TXD_CC); + + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPSEC, + NGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ngbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + if (likely(ngbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ngbe_maybe_stop_tx(tx_ring, size); +} + + +static int ngbe_tx_map(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = ngbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = NGBE_TX_DESC(tx_ring, i); + + ngbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > NGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ NGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += NGBE_MAX_DATA_PER_TXD; + size -= NGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | NGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + ngbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused struct net_device *sb_dev, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ +{ + int txq; + + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case __constant_htons(ETH_P_FIP): + fallthrough; + default: +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + return netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + return fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + return fallback(dev, skb); +#else + return __netdev_pick_tx(dev, skb); +#endif + } + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); + + + return txq; +} +#endif /* CONFIG_FCOE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *skb, + struct ngbe_adapter __maybe_unused *adapter, + struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + ngbe_dptype dptype; + + /* + * need: 1 descriptor per page * PAGE_SIZE/NGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/NGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (ngbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << NGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= NGBE_TX_FLAGS_HW_VLAN; + } + + if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= NGBE_TX_FLAGS_SW_VLAN; + } + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_tx(skb)->hardware) && + adapter->ptp_clock) { + if(!test_and_set_bit_lock(__NGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_tx(skb)->in_progress = 1; +#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__NGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + tx_flags |= NGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + +#endif +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= NGBE_TX_FLAGS_CC; + +#endif + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + + tso = ngbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + ngbe_tx_csum(tx_ring, first, dptype); + + ngbe_tx_map(tx_ring, first, hdr_len); + +#ifndef HAVE_TRANS_START_IN_QUEUE + tx_ring->netdev->trans_start = jiffies; +#endif + +#ifndef HAVE_SKB_XMIT_MORE + ngbe_maybe_stop_tx(tx_ring, DESC_NEEDED); +#endif + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + e_dev_err("ngbe_xmit_frame_ring drop \n"); + + return NETDEV_TX_OK; + + +} + +static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + +#ifdef HAVE_TX_MQ + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif + + return ngbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ngbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_set_mac(struct net_device *netdev, void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ngbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ngbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); + + return 0; +} + +static int ngbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ +#ifdef HAVE_PTP_1588_CLOCK + struct ngbe_adapter *adapter = netdev_priv(netdev); + +#endif + switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return ngbe_ptp_get_ts_config(adapter, ifr); +#endif + case SIOCSHWTSTAMP: + return ngbe_ptp_set_ts_config(adapter, ifr); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ngbe_netpoll(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state)) + return; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + ngbe_msix_clean_rings(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; + } + } else { + ngbe_intr(0, adapter); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +void ngbe_save_ring_stats(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *ring; + int i = 0; + + memset(adapter->old_tx_qstats, 0, sizeof(struct ngbe_queue_stats)*MAX_TX_QUEUES); + memset(adapter->old_tx_stats, 0, sizeof(struct ngbe_tx_queue_stats) * MAX_TX_QUEUES); + memset(adapter->old_rx_qstats, 0, sizeof(struct ngbe_queue_stats)*MAX_RX_QUEUES); + memset(adapter->old_rx_stats, 0, sizeof(struct ngbe_rx_queue_stats)*MAX_RX_QUEUES); + + for (;i < adapter->num_q_vectors;i++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[i]; + ngbe_for_each_ring(ring, q_vector->tx) { + + adapter->old_tx_qstats[i].packets += ring->stats.packets; + adapter->old_tx_qstats[i].bytes += ring->stats.bytes; +#ifdef BP_EXTENDED_STATS + adapter->old_tx_qstats[i].yields += ring->stats.yields; + adapter->old_tx_qstats[i].misses += ring->stats.misses; + adapter->old_tx_qstats[i].cleaned += ring->stats.cleaned; +#endif + adapter->old_tx_stats[i].restart_queue += ring->tx_stats.restart_queue; + adapter->old_tx_stats[i].tx_busy += ring->tx_stats.tx_busy; + adapter->old_tx_stats[i].tx_done_old += ring->tx_stats.tx_done_old; + } + ngbe_for_each_ring(ring, q_vector->rx) + { + adapter->old_rx_qstats[i] = ring->stats; + adapter->old_rx_stats[i] = ring->rx_stats; + } + } +} + +void ngbe_set_ring_stats(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *ring; + int i = 0; + + for (;i < adapter->num_q_vectors;i++) { + ring = adapter->q_vector[i]->tx.ring; + ring->stats = adapter->old_tx_qstats[i]; + ring->tx_stats = adapter->old_tx_stats[i]; + ring = adapter->q_vector[i]->rx.ring; + ring->stats = adapter->old_rx_qstats[i]; + ring->rx_stats = adapter->old_rx_stats[i]; + } +} + +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc, bool save_stats) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + if (save_stats) + ngbe_save_ring_stats(adapter); + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (adapter->xdp_prog) { + if (adapter->num_rx_queues > MAX_RX_QUEUES / 2) { + adapter->old_rss_limit = adapter->ring_feature[RING_F_RSS].limit; + adapter->ring_feature[RING_F_RSS].limit = MAX_RX_QUEUES / 2; + e_dev_info("limit tx rx ring to 4 " + "because xdpring take up half of the txring"); + } + else { + adapter->old_rss_limit = 0; + } + } + + if (netif_running(dev)) + ngbe_close(dev); + else + ngbe_reset(adapter); + + ngbe_clear_interrupt_scheme(adapter); + + if (tc) { + netdev_set_num_tc(dev, tc); + } else { + netdev_reset_tc(dev); + } + ngbe_init_interrupt_scheme(adapter); + if (save_stats) + ngbe_set_ring_stats(adapter); + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ngbe_sriov_reinit(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ngbe_setup_tc(netdev, netdev_get_num_tc(netdev), 0); + rtnl_unlock(); +} +#endif + +void ngbe_do_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); +} + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 ngbe_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t ngbe_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + +#ifdef NGBE_NO_LRO + /* Turn off LRO if not RSC capable */ + features &= ~NETIF_F_LRO; +#endif + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + else + features |= NETIF_F_HW_VLAN_STAG_RX; + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + else + features |= NETIF_F_HW_VLAN_STAG_TX; +#endif + return features; +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int ngbe_set_features(struct net_device *netdev, u32 features) +#else +static int ngbe_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_STAG_RX)) +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if (features & NETIF_F_HW_VLAN_STAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & NGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, ~NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~NGBE_FLAG2_RSS_ENABLED; + } + } + + + if (need_reset) + ngbe_do_reset(netdev); + + return 0; + +} +#endif /* HAVE_NDO_SET_FEATURES */ + + +#ifdef HAVE_NDO_GSO_CHECK +static bool +ngbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) +{ + return vxlan_gso_check(skb); +} +#endif /* HAVE_NDO_GSO_CHECK */ + +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +static int ngbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, +#ifdef HAVE_NDO_FDB_ADD_VID + u16 vid, +#endif +#ifdef HAVE_NDO_FDB_ADD_EXTACK + u16 flags, + struct netlink_ext_ack __always_unused *extack) +#else + u16 flags) +#endif +#else +static int ngbe_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +#endif /* USE_CONST_DEV_UC_CHAR */ +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (NGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; + } + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_NDO_FDB_ADD_VID + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +#else + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); +#endif /* HAVE_NDO_FDB_ADD_VID */ +#else + return ndo_dflt_fdb_add(ndm, dev, addr, flags); +#endif /* USE_CONST_DEV_UC_CHAR */ +} + +#ifdef HAVE_BRIDGE_ATTRIBS +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS) +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags) +#else +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + return -EINVAL; + } + + adapter->bridge_mode = mode; + + /* re-configure settings related to bridge mode */ + ngbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined (NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +#ifdef HAVE_NDO_FEATURES_CHECK +#define NGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t ngbe_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) { + vlan_num++; + } + + if (vlan_depth) { + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + NGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + return features; +} +#endif /* HAVE_NDO_FEATURES_CHECK */ + + +#ifdef HAVE_XDP_SUPPORT +static int ngbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct ngbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + bool need_reset; + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & NGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + if (adapter->xdp_prog && prog) { + e_dev_err("XDP can't be active at the same time"); + return -EBUSY; + } + + /* verify ngbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + + if (frame_size > ngbe_rx_bufsz(ring)) + return -EINVAL; + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* If transitioning XDP modes reconfigure rings */ + if (need_reset) { + int err = 0; + if (!adapter->xdp_prog && adapter->old_rss_limit) + adapter->ring_feature[RING_F_RSS].limit = adapter->old_rss_limit; + + err = ngbe_setup_tc(dev, netdev_get_num_tc(dev), 1); + + if (err) { + rcu_assign_pointer(adapter->xdp_prog, old_prog); + return -EINVAL; + } + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + if (adapter->xdp_prog) + e_dev_info("xdp program is setup"); + else + e_dev_info("xdp program not load"); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + + +#ifdef HAVE_NDO_BPF +static int ngbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) +#else +static int ngbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +#endif +{ +#ifdef HAVE_XDP_QUERY_PROG + struct ngbe_adapter *adapter = netdev_priv(dev); +#endif + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ngbe_xdp_setup(dev, xdp->prog); +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = !!(adapter->xdp_prog); +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + return 0; +#endif + default: + return -EINVAL; + } +} +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +static int ngbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +#else +static int ngbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct ngbe_ring *ring; +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + int drops = 0; + int i; +#else + int err; +#endif + if (unlikely(test_bit(__NGBE_DOWN, &adapter->state))) + return -ENETDOWN; + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; +#endif + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return -ENXIO; +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = ngbe_xmit_xdp_ring(ring, xdpf); + if (err != NGBE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (unlikely(flags & XDP_XMIT_FLUSH)){ + wmb(); + writel(ring->next_to_use, ring->tail); + } + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + return n - drops; +#else /* HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS */ + + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + err = ngbe_xmit_xdp_ring(ring, xdp); + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + if (err != NGBE_XDP_TX) + return -ENOSPC; + + return 0; +#endif +} + +#ifndef NO_NDO_XDP_FLUSH +static void ngbe_xdp_flush(struct net_device *dev) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct ngbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__NGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return; + + wmb(); + writel(ring->next_to_use, ring->tail); + + return; +} +#endif /* !NO_NDO_XDP_FLUSH */ + +#endif /* HAVE_XDP_SUPPORT */ + +#ifdef HAVE_VIRTUAL_STATION +static inline int ngbe_inc_vmdqs(struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + + if (++adapter->num_vmdqs > 1 || adapter->num_vfs > 0) + adapter->flags |= NGBE_FLAG_VMDQ_ENABLED | + NGBE_FLAG_SRIOV_ENABLED; + accel->index = find_first_zero_bit(&adapter->fwd_bitmask, + NGBE_MAX_MACVLANS); + set_bit(accel->index, &adapter->fwd_bitmask); + + return 1 + find_last_bit(&adapter->fwd_bitmask, NGBE_MAX_MACVLANS); +} + +static inline int ngbe_dec_vmdqs(struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + + if (--adapter->num_vmdqs == 1 && adapter->num_vfs == 0) + adapter->flags &= ~(NGBE_FLAG_VMDQ_ENABLED | + NGBE_FLAG_SRIOV_ENABLED); + clear_bit(accel->index, &adapter->fwd_bitmask); + + return 1 + find_last_bit(&adapter->fwd_bitmask, NGBE_MAX_MACVLANS); +} + +static void *ngbe_fwd_add(struct net_device *pdev, struct net_device *vdev) +{ + struct ngbe_fwd_adapter *accel = NULL; + struct ngbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_vmdqs; + int err; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return ERR_PTR(-EPERM); + + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * than the available number of pools. + */ + if (used_pools >= NGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_RPS + if (vdev->num_rx_queues != vdev->num_tx_queues) { + netdev_info(pdev, "%s: Only supports a single queue count for " + "TX and RX\n", + vdev->name); + return ERR_PTR(-EINVAL); + } +#endif + /* Check for hardware restriction on number of rx/tx queues */ + if (vdev->num_tx_queues != 2 && vdev->num_tx_queues != 4) { + netdev_info(pdev, + "%s: Supports RX/TX Queue counts 2, and 4\n", + pdev->name); + return ERR_PTR(-EINVAL); + } + + accel = kzalloc(sizeof(*accel), GFP_KERNEL); + if (!accel) + return ERR_PTR(-ENOMEM); + accel->adapter = adapter; + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->ring_feature[RING_F_VMDQ].limit = ngbe_inc_vmdqs(accel); + adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; + + /* Force reinit of ring allocation with VMDQ enabled */ + err = ngbe_setup_tc(pdev, netdev_get_num_tc(pdev), 0); + if (err) + goto fwd_add_err; + + err = ngbe_fwd_ring_up(vdev, accel); + if (err) + goto fwd_add_err; + + netif_tx_start_all_queues(vdev); + return accel; +fwd_add_err: + /* unwind counter and free adapter struct */ + netdev_info(pdev, + "%s: dfwd hardware acceleration failed\n", vdev->name); + ngbe_dec_vmdqs(accel); + kfree(accel); + return ERR_PTR(err); +} + +static void ngbe_fwd_del(struct net_device *pdev, void *fwd_priv) +{ + struct ngbe_fwd_adapter *accel = fwd_priv; + struct ngbe_adapter *adapter = accel->adapter; + + if (!accel || adapter->num_vmdqs <= 1) + return; + + adapter->ring_feature[RING_F_VMDQ].limit = ngbe_dec_vmdqs(accel); + ngbe_fwd_ring_down(accel->vdev, accel); + ngbe_setup_tc(pdev, netdev_get_num_tc(pdev), 0); + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->index, adapter->num_vmdqs, + accel->rx_base_queue, + accel->rx_base_queue + adapter->queues_per_pool, + adapter->fwd_bitmask); + kfree(accel); +} +#endif /*HAVE_VIRTUAL_STATION*/ + + +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops ngbe_netdev_ops = { + .ndo_open = ngbe_open, + .ndo_stop = ngbe_close, + .ndo_start_xmit = ngbe_xmit_frame, +#if IS_ENABLED(CONFIG_FCOE) + .ndo_select_queue = ngbe_select_queue, +#else +#ifndef HAVE_MQPRIO + .ndo_select_queue = __netdev_pick_tx, +#endif +#endif + .ndo_set_rx_mode = ngbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ngbe_set_mac, +#ifdef CENTOS_MTU_PORT_UPDATE + .ndo_change_mtu_rh74 = ngbe_change_mtu, +#else + .ndo_change_mtu = ngbe_change_mtu, +#endif + .ndo_tx_timeout = ngbe_tx_timeout, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + .ndo_vlan_rx_add_vid = ngbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ngbe_vlan_rx_kill_vid, +#endif + .ndo_do_ioctl = ngbe_ioctl, +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = ngbe_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = ngbe_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = ngbe_ndo_set_vf_vlan, +#endif + +/* not support by emerald */ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = ngbe_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = ngbe_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = ngbe_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = ngbe_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = ngbe_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = ngbe_ndo_get_vf_config, +#endif +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = ngbe_get_stats64, +#else + .ndo_get_stats = ngbe_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ngbe_netpoll, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = ngbe_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = &ngbe_vlan_mode, +#endif +#ifdef HAVE_FDB_OPS + .ndo_fdb_add = ngbe_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = ndo_dflt_fdb_del, + .ndo_fdb_dump = ndo_dflt_fdb_dump, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = ngbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ngbe_ndo_bridge_getlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif +#ifdef HAVE_VIRTUAL_STATION + .ndo_dfwd_add_station = ngbe_fwd_add, + .ndo_dfwd_del_station = ngbe_fwd_del, +#endif +#ifdef HAVE_NDO_GSO_CHECK + .ndo_gso_check = ngbe_gso_check, +#endif /* HAVE_NDO_GSO_CHECK */ +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = ngbe_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF + .ndo_bpf = ngbe_xdp, +#else + .ndo_xdp = ngbe_xdp, +#endif + .ndo_xdp_xmit = ngbe_xdp_xmit, +#ifndef NO_NDO_XDP_FLUSH + .ndo_xdp_flush = ngbe_xdp_flush, +#endif /* !NO_NDO_XDP_FLUSH */ +#endif +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext ngbe_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = ngbe_set_features, + .ndo_fix_features = ngbe_fix_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; +#endif /* HAVE_NET_DEVICE_OPS */ + +void ngbe_assign_netdev_ops(struct net_device *dev) +{ +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops = &ngbe_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(dev, &ngbe_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = &ngbe_open; + dev->stop = &ngbe_close; + dev->hard_start_xmit = &ngbe_xmit_frame; + dev->get_stats = &ngbe_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &ngbe_set_rx_mode; +#endif + dev->set_multicast_list = &ngbe_set_rx_mode; + dev->set_mac_address = &ngbe_set_mac; + dev->change_mtu = &ngbe_change_mtu; + dev->do_ioctl = &ngbe_ioctl; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &ngbe_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + dev->vlan_rx_register = &ngbe_vlan_mode; + dev->vlan_rx_add_vid = &ngbe_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &ngbe_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &ngbe_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + dev->select_queue = &ngbe_select_queue; +#else + dev->select_queue = &__netdev_pick_tx; +#endif +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* HAVE_NET_DEVICE_OPS */ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(dev)->ndo_busy_poll = ngbe_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + + ngbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * ngbe_wol_supported - Check whether device supports WoL + * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ngbe_wol_supported(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* check eeprom to see if WOL is enabled */ + if ((hw->bus.func == 0) || + (hw->bus.func == 1) || + (hw->bus.func == 2) || + (hw->bus.func == 3) ) + return true; + else + return false; +} + + +/** + * ngbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ngbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ngbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ngbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct net_device *netdev; + struct ngbe_adapter *adapter = NULL; + struct ngbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + u32 eeprom_verl = 0; + u32 etrack_id = 0; + char *info_string, *i_s_var; + u32 eeprom_cksum_devcap = 0; + u32 saved_version = 0; + u32 devcap; + u32 led_conf = 0; + + bool disable_dev = false; +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + u32 hw_features; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_FEATURES */ + u16 pvalue = 0; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + ngbe_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + /* errata 16 */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + +#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct ngbe_adapter), NGBE_MAX_TX_QUEUES); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct ngbe_adapter)); +#endif /* HAVE_TX_MQ */ + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + /* default config: 10/100/1000M autoneg on */ + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG; + hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN; + /* assign netdev ops and ethtool ops */ + ngbe_assign_netdev_ops(netdev); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = ngbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + ngbe_check_options(adapter); + + hw->mac.ops.set_lan_id(hw); + + /* check if flash load is done after hw power up */ + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_sw_init; + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_sw_init; + + if(ngbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); + /* reset_hw fills in the perm_addr as well */ + + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err) { + e_dev_err("HW reset failed: %d\n", err); + goto err_sw_init; + } + + err = hw->phy.ops.init(hw); + if (err) + goto err_sw_init; + + netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; +#endif + netdev->features |= ngbe_tso_features(); +#ifdef NETIF_F_RXHASH + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + netdev->features |= NETIF_F_RXHASH; +#endif + netdev->features |= NETIF_F_RXCSUM; +#ifdef HAVE_VIRTUAL_STATION + netdev->features |= NETIF_F_HW_L2FW_DOFFLOAD; +#endif +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif + hw_features |= netdev->features; + +#else /* !HAVE_NDO_SET_FEATURES */ + +#ifdef NETIF_F_GRO + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_FILTER; +#endif + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->features |= NETIF_F_NTUPLE; +#ifdef HAVE_NDO_SET_FEATURES + hw_features |= NETIF_F_SCTP_CSUM | NETIF_F_NTUPLE; +#endif + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif + +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS + netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9414 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#endif +#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_HIGHDMA; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } + + if (hw->bus.lan_id == 0) { + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + } else { + eeprom_cksum_devcap = rd32(hw, NGBE_CALSUM_CAP_STATUS); + saved_version = rd32(hw, NGBE_EEPROM_VERSION_STORE_REG); + } + + hw->eeprom.ops.init_params(hw); + hw->mac.ops.release_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_MB); + if(hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + /* make sure the EEPROM is good */ + if (hw->eeprom.ops.eeprom_chksum_cap_st(hw, NGBE_CALSUM_COMMAND, &devcap)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + } + + if (hw->eeprom.ops.phy_led_oem_chk(hw, &led_conf)) { + adapter->led_conf = -1; + } else { + adapter->led_conf = led_conf; + } + + eth_hw_addr_set(netdev, hw->mac.perm_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, ngbe_service_timer, 0); + if (NGBE_POLL_LINK_STATUS == 1) + timer_setup(&adapter->link_check_timer, ngbe_link_check_timer, 0); + + if (NGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, ngbe_service_task); + set_bit(__NGBE_SERVICE_INITED, &adapter->state); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); + + err = ngbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + +#ifdef CONFIG_PCI_IOV +#ifdef HAVE_SRIOV_CONFIGURE + if (adapter->num_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module " + "parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x" + "/sriov_numvfs\n", + adapter->num_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + } + +#endif + if (adapter->flags & NGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT); + ngbe_enable_sriov(adapter); + } +#endif /* CONFIG_PCI_IOV */ + + /* WOL not supported for all devices */ + adapter->wol = 0; + if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, + &adapter->eeprom_cap); + /*only support in LAN0*/ + adapter->eeprom_cap = NGBE_DEVICE_CAPS_WOL_PORT0; + } else { + adapter->eeprom_cap = eeprom_cksum_devcap & 0xffff; + } + if ( ngbe_wol_supported(adapter) ) + adapter->wol = NGBE_PSR_WKUP_CTL_MAG; + if ( (hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP){ + /*enable wol first in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, 0xa50F); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + } + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + /* + * Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + if(hw->bus.lan_id == 0 || saved_version == 0){ + hw->eeprom.ops.read32(hw, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x10000 | (u32)adapter->eeprom_cap); + }else if(eeprom_cksum_devcap) { + etrack_id = saved_version; + }else { + hw->eeprom.ops.read32(hw, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + } + + /* Make sure offset to SCSI block is valid */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + if (err == NGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + } else if (err) { + e_dev_err("HW init failed, err = %d\n", err); + goto err_register; + } + + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); +#endif + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + + + + /* print all messages at the end so that we use our eth%d name */ + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. */ + + expected_gts = ngbe_enumerate_functions(adapter) * 10; + + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ngbe_check_minimum_link(adapter, expected_gts); + + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF); + + if (hw->ncsi_enabled) + e_info(probe, "NCSI : support"); + else + e_info(probe, "NCSI : unsupported"); + + e_info(probe, "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", + hw->phy.type == ngbe_phy_internal?"Internal":"External"); + + e_info(probe, "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); + if (adapter->flags & NGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); +#ifndef NGBE_NO_LRO + else if (netdev->features & NETIF_F_LRO) + i_s_var += sprintf(i_s_var, "LRO "); +#endif + + BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: + +#ifdef CONFIG_PCI_IOV + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) + ngbe_vf_configuration(pdev, (i | 0x10000000)); + } +#endif + + e_info(probe, "WangXun(R) Gigabit Network Connection\n"); + cards_found++; + +#ifdef NGBE_SYSFS + if (ngbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#else +#ifdef NGBE_PROCFS + if (ngbe_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* NGBE_PROCFS */ +#endif /* NGBE_SYSFS */ + + +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_adapter_init(adapter); +#endif /* HAVE_NGBE_DEBUG_FS */ + + if (NGBE_DIS_COMP_TIMEOUT == 1) { + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &pvalue); + pvalue = pvalue | 0x10; + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, pvalue); + } + + return 0; + +err_register: + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); +err_sw_init: +#ifdef CONFIG_PCI_IOV + ngbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + + return err; +} + +/** + * ngbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ngbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ngbe_remove(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + struct ngbe_hw *hw; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + hw = &adapter->hw; + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + netdev = adapter->netdev; +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_adapter_exit(adapter); +#endif + + set_bit(__NGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + + +#ifdef NGBE_SYSFS + ngbe_sysfs_exit(adapter); +#else +#ifdef NGBE_PROCFS + ngbe_procfs_exit(adapter); +#endif +#endif /* NGBE-SYSFS */ + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + +#ifdef CONFIG_PCI_IOV + ngbe_disable_sriov(adapter); +#endif + + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); + + iounmap(adapter->io_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == NGBE_FAILED_READ_CFG_WORD) { + ngbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg) +{ + struct ngbe_adapter *adapter = hw->back; + u16 value; + + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_WORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef HAVE_PCI_ERS +#ifdef CONFIG_PCI_IOV +static u32 ngbe_read_pci_cfg_dword(struct ngbe_hw *hw, u32 reg) +{ + struct ngbe_adapter *adapter = hw->back; + u32 value; + + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_DWORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_ERS */ + +void ngbe_write_pci_cfg_word(struct ngbe_hw *hw, u32 reg, u16 value) +{ + struct ngbe_adapter *adapter = hw->back; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +#ifdef HAVE_PCI_ERS +/** + * ngbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ngbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = ngbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 12); + if (NGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + NGBE_VF_DEVICE_ID, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + NGBE_VF_DEVICE_ID, vfdev); + } + /* + * There's a slim chance the VF could have been hot + * plugged, so if it is no longer present we don't need + * to issue the VFLR.Just clean up the AER in that case. + */ + if (vfdev) { + ngbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_aer_clear_nonfatal_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + + skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + + if (!test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + ngbe_close(netdev); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ngbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ngbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + /* + * After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_aer_clear_nonfatal_status(pdev); + + return result; +} + +/** + * ngbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ngbe_io_resume(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + +#endif + rtnl_lock(); + if (netif_running(netdev)) + ngbe_open(netdev); + + netif_device_attach(netdev); + rtnl_unlock(); +} + +#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +static const struct pci_error_handlers ngbe_err_handler = { +#else +static struct pci_error_handlers ngbe_err_handler = { +#endif + .error_detected = ngbe_io_error_detected, + .slot_reset = ngbe_io_slot_reset, + .resume = ngbe_io_resume, +}; +#endif /* HAVE_PCI_ERS */ + +struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw) +{ + return ((struct ngbe_adapter *)hw->back)->netdev; +} +struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = + container_of(hw, struct ngbe_adapter, hw); + return (struct ngbe_msg *)&adapter->msg_enable; +} + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static const struct dev_pm_ops ngbe_pm_ops = { + .suspend = ngbe_suspend, + .resume = ngbe_resume, + .freeze = ngbe_freeze, + .thaw = ngbe_thaw, + .poweroff = ngbe_suspend, + .restore = ngbe_resume, +}; +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh ngbe_driver_rh = { + .sriov_configure = ngbe_pci_sriov_configure, +}; +#endif + +static struct pci_driver ngbe_driver = { + .name = ngbe_driver_name, + .id_table = ngbe_pci_tbl, + .probe = ngbe_probe, + .remove = __devexit_p(ngbe_remove), +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT + .driver = { + .pm = &ngbe_pm_ops, + }, +#else + .suspend = ngbe_suspend, + .resume = ngbe_resume, +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif +#ifndef USE_REBOOT_NOTIFIER + .shutdown = ngbe_shutdown, +#endif + +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = ngbe_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &ngbe_driver_rh, +#endif + +#ifdef HAVE_PCI_ERS + .err_handler = &ngbe_err_handler +#endif +}; + +/** + * ngbe_init_module - Driver Registration Routine + * + * ngbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ngbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ngbe_driver_string, ngbe_driver_version); + pr_info("%s\n", ngbe_copyright); + + ngbe_wq = create_singlethread_workqueue(ngbe_driver_name); + if (!ngbe_wq) { + pr_err("%s: Failed to create workqueue\n", ngbe_driver_name); + return -ENOMEM; + } + +#ifdef NGBE_PROCFS + if (ngbe_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_init(); +#endif + + ret = pci_register_driver(&ngbe_driver); + return ret; +} + +module_init(ngbe_init_module); + +/** + * ngbe_exit_module - Driver Exit Cleanup Routine + * + * ngbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ngbe_exit_module(void) +{ + pci_unregister_driver(&ngbe_driver); +#ifdef NGBE_PROCFS + ngbe_procfs_topdir_exit(); +#endif + destroy_workqueue(ngbe_wq); +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_exit(); +#endif /* HAVE_NGBE_DEBUG_FS */ +} + +module_exit(ngbe_exit_module); + +/* ngbe_main.c */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c new file mode 100644 index 000000000000..4f03daa97baf --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c @@ -0,0 +1,692 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_type.h" +#include "ngbe.h" +#include "ngbe_mbx.h" + + +/** + * ngbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +int ngbe_read_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = hw->mbx.ops.read(hw, msg, size, mbx_id); + + return err; +} + +/** + * ngbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int ngbe_write_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = NGBE_ERR_MBX; + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + + return err; +} + +/** + * ngbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_msg(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + err = hw->mbx.ops.check_for_msg(hw, mbx_id); + + return err; +} + +/** + * ngbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_ack(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + err = hw->mbx.ops.check_for_ack(hw, mbx_id); + + return err; +} + +/** + * ngbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_rst(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * ngbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static int ngbe_poll_for_msg(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && hw->mbx.ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : NGBE_ERR_MBX; +} + +/** + * ngbe_poll_for_ack - Wait for message acknowledngbeent + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledngbeent + **/ +static int ngbe_poll_for_ack(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && hw->mbx.ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : NGBE_ERR_MBX; +} + +/** + * ngbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +int ngbe_read_posted_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = ngbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = hw->mbx.ops.read(hw, msg, size, mbx_id); +out: + return err; +} + +/** + * ngbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +int ngbe_write_posted_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return NGBE_ERR_MBX; + + /* send msg */ + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = ngbe_poll_for_ack(hw, mbx_id); + + return err; +} + + +/** + * ngbe_init_mbx_ops - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ngbe_init_mbx_ops(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; +} + + + +/** + * ngbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 ngbe_read_v2p_mailbox(struct ngbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, NGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & NGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ngbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static int ngbe_check_for_bit_vf(struct ngbe_hw *hw, u32 mask) +{ + u32 mailbox = ngbe_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : NGBE_ERR_MBX); +} + +/** + * ngbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_msg_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf sts bit */ + if (!ngbe_check_for_bit_vf(hw, NGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * ngbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static int ngbe_check_for_ack_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf ack bit */ + if (!ngbe_check_for_bit_vf(hw, NGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * ngbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +static int ngbe_check_for_rst_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + if (!ngbe_check_for_bit_vf(hw, (NGBE_VXMAILBOX_RSTD | + NGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * ngbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int ngbe_obtain_mbx_lock_vf(struct ngbe_hw *hw) +{ + int err = NGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = ngbe_read_v2p_mailbox(hw); + if (mailbox & NGBE_VXMAILBOX_VFU) + err = 0; + else + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * ngbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int ngbe_write_mbx_vf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err; + u16 i; + + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ngbe_check_for_msg_vf(hw, 0); + ngbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, NGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * ngbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static int ngbe_read_mbx_vf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err = 0; + u16 i; + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, NGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + + +/** + * ngbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ngbe_init_mbx_params_vf(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->udelay = NGBE_VF_MBX_INIT_DELAY; + + mbx->size = NGBE_VXMAILBOX_SIZE; + + mbx->ops.read = ngbe_read_mbx_vf; + mbx->ops.write = ngbe_write_mbx_vf; + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; + mbx->ops.check_for_msg = ngbe_check_for_msg_vf; + mbx->ops.check_for_ack = ngbe_check_for_ack_vf; + mbx->ops.check_for_rst = ngbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +static int ngbe_check_for_bit_pf(struct ngbe_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(hw, NGBE_MBVFICR); + int err = NGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, NGBE_MBVFICR, mask); + } + + return err; +} + +/** + * ngbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_msg_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 vf_bit = vf; + + if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFREQ_VF1 << vf_bit)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * ngbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_ack_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 vf_bit = vf; + + if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFACK_VF1 << vf_bit)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * ngbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_rst_pf(struct ngbe_hw *hw, u16 vf) +{ + u32 vflre = 0; + int err = NGBE_ERR_MBX; + + vflre = rd32(hw, NGBE_VFLRE); + + if (vflre & (1 << vf)) { + err = 0; + wr32(hw, NGBE_VFLREC, (1 << vf)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * ngbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int ngbe_obtain_mbx_lock_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, NGBE_PXMAILBOX(vf)); + if (mailbox & NGBE_PXMAILBOX_PFU) + err = 0; + else + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + return err; +} + +/** + * ngbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int ngbe_write_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ngbe_check_for_msg_pf(hw, vf); + ngbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, NGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; + +} + +/** + * ngbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static int ngbe_read_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, NGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * ngbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ngbe_init_mbx_params_pf(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->udelay = 0; + + mbx->size = NGBE_VXMAILBOX_SIZE; + + mbx->ops.read = ngbe_read_mbx_pf; + mbx->ops.write = ngbe_write_mbx_pf; + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; + mbx->ops.check_for_msg = ngbe_check_for_msg_pf; + mbx->ops.check_for_ack = ngbe_check_for_ack_pf; + mbx->ops.check_for_rst = ngbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h new file mode 100644 index 000000000000..6b6d8be25c45 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h @@ -0,0 +1,172 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _NGBE_MBX_H_ +#define _NGBE_MBX_H_ + +#define NGBE_VXMAILBOX_SIZE (16) + +/** + * VF Registers + **/ +#define NGBE_VXMAILBOX 0x00600 +#define NGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define NGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define NGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define NGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define NGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define NGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define NGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define NGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define NGBE_VXMAILBOX_R2C_BITS (NGBE_VXMAILBOX_RSTD | \ + NGBE_VXMAILBOX_PFSTS | NGBE_VXMAILBOX_PFACK) + +#define NGBE_VXMBMEM 0x00C00 /* 16*4B */ + +/** + * PF Registers + **/ +#define NGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,7] */ +#define NGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define NGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define NGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define NGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define NGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define NGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,7] */ + +#define NGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define NGBE_VFLRE 0x004A0 +#define NGBE_VFLREC 0x004A8 + +/* SR-IOV specific macros */ +#define NGBE_MBVFICR 0x00480 + + + +#define NGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define NGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define NGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define NGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define NGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +/** + * Messages + **/ +/* If it's a NGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is NGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define NGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define NGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define NGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define NGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define NGBE_VT_MSGINFO_MASK (0xFF << NGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ngbe_pfvf_api_rev { + ngbe_mbox_api_null, + ngbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ngbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ngbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ngbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ngbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ngbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define NGBE_VF_RESET 0x01 /* VF requests reset */ +#define NGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define NGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define NGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define NGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define NGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define NGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define NGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +#define NGBE_NOFITY_VF_LINK_STATUS 0x01 + +/* mailbox API, version 1.2 VF requests */ +#define NGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define NGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define NGBE_VF_UPDATE_XCAST_MODE 0x0c +#define NGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +#define NGBE_VF_GET_LINK_STATUS 0x20 /* VF get link status from PF */ + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ngbevf_xcast_modes { + NGBEVF_XCAST_MODE_NONE = 0, + NGBEVF_XCAST_MODE_MULTI, + NGBEVF_XCAST_MODE_ALLMULTI, + NGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define NGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define NGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define NGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define NGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define NGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define NGBE_VF_MC_TYPE_WORD 3 + +#define NGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define NGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define NGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define NGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define NGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define NGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define NGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define NGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define NGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define NGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define NGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define NGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +int ngbe_read_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_write_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_read_posted_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_write_posted_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_check_for_msg(struct ngbe_hw *, u16); +int ngbe_check_for_ack(struct ngbe_hw *, u16); +int ngbe_check_for_rst(struct ngbe_hw *, u16); +void ngbe_init_mbx_ops(struct ngbe_hw *hw); +void ngbe_init_mbx_params_vf(struct ngbe_hw *); +void ngbe_init_mbx_params_pf(struct ngbe_hw *); + +#endif /* _NGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h new file mode 100644 index 000000000000..86951f67e64f --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h @@ -0,0 +1,219 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +/* glue for the OS independent part of ngbe + * includes register access macros + */ + +#ifndef _NGBE_OSDEP_H_ +#define _NGBE_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "ngbe_kcompat.h" + +#define NGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define NGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define NGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define NGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define IOMEM __iomem + +#define NGBE_NAME "ngbe" + +/* #define DBG 1 */ + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel NGBE_NAME ": %s: %s: " fmt, \ + adapter->netdev->name, \ + __func__, ## args))) + +#ifndef _WIN32 +#define ngbe_emerg(fmt, ...) printk(KERN_EMERG fmt, ## __VA_ARGS__) +#define ngbe_alert(fmt, ...) printk(KERN_ALERT fmt, ## __VA_ARGS__) +#define ngbe_crit(fmt, ...) printk(KERN_CRIT fmt, ## __VA_ARGS__) +#define ngbe_error(fmt, ...) printk(KERN_ERR fmt, ## __VA_ARGS__) +#define ngbe_warn(fmt, ...) printk(KERN_WARNING fmt, ## __VA_ARGS__) +#define ngbe_notice(fmt, ...) printk(KERN_NOTICE fmt, ## __VA_ARGS__) +#define ngbe_info(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#define ngbe_print(fmt, ...) printk(KERN_DEBUG fmt, ## __VA_ARGS__) +#define ngbe_trace(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#else /* _WIN32 */ +#define ngbe_error(lvl, fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-error: %s@%d, " fmt, \ + "ngbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* !_WIN32 */ + +#ifdef DBG +#ifndef _WIN32 +#define ngbe_debug(fmt, ...) \ + printk(KERN_DEBUG \ + "%s-debug: %s@%d, " fmt, \ + "ngbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#else /* _WIN32 */ +#define ngbe_debug(fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-debug: %s@%d, " fmt, \ + "ngbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* _WIN32 */ +#else /* DBG */ +#define ngbe_debug(fmt, ...) do {} while (0) +#endif /* DBG */ + + +#ifdef DBG +#define ASSERT(_x) BUG_ON(!(_x)) +#define DEBUGOUT(S) printk(KERN_DEBUG S) +#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGFUNC(fmt, ...) ngbe_debug(fmt, ## __VA_ARGS__) +#else +#define ASSERT(_x) do {} while (0) +#define DEBUGOUT(S) do {} while (0) +#define DEBUGOUT1(S, A...) do {} while (0) +#define DEBUGOUT2(S, A...) do {} while (0) +#define DEBUGOUT3(S, A...) do {} while (0) +#define DEBUGOUT4(S, A...) do {} while (0) +#define DEBUGOUT5(S, A...) do {} while (0) +#define DEBUGOUT6(S, A...) do {} while (0) +#define DEBUGFUNC(fmt, ...) do {} while (0) +#endif + +#define NGBE_SFP_DETECT_RETRIES 2 + +struct ngbe_hw; +struct ngbe_msg { + u16 msg_enable; +}; +struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw); +struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw); + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ngbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ngbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define NGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define NGBE_FAILED_READ_CFG_WORD 0xffffU +#define NGBE_FAILED_READ_CFG_BYTE 0xffU + +extern u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet); +extern u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg); +extern void ngbe_write_pci_cfg_word(struct ngbe_hw *hw, u32 reg, u16 value); + +#define NGBE_READ_PCIE_WORD ngbe_read_pci_cfg_word +#define NGBE_WRITE_PCIE_WORD ngbe_write_pci_cfg_word +#define NGBE_R32_Q(h, r) ngbe_read_reg(h, r, true) + +#ifndef writeq +#define writeq(val, addr) do { writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); \ + } while (0); +#endif + +#define NGBE_EEPROM_GRANT_ATTEMPS 100 +#define NGBE_HTONL(_i) htonl(_i) +#define NGBE_NTOHL(_i) ntohl(_i) +#define NGBE_NTOHS(_i) ntohs(_i) +#define NGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define NGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +enum { + NGBE_ERROR_SOFTWARE, + NGBE_ERROR_POLLING, + NGBE_ERROR_INVALID_STATE, + NGBE_ERROR_UNSUPPORTED, + NGBE_ERROR_ARGUMENT, + NGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case NGBE_ERROR_SOFTWARE: \ + case NGBE_ERROR_CAUTION: \ + case NGBE_ERROR_POLLING: \ + netif_warn(ngbe_hw_to_msg(hw), drv, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case NGBE_ERROR_INVALID_STATE: \ + case NGBE_ERROR_UNSUPPORTED: \ + case NGBE_ERROR_ARGUMENT: \ + netif_err(ngbe_hw_to_msg(hw), hw, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) +#define UNREFERENCED_PARAMETER(_p) UNREFERENCED_1PARAMETER(_p) + +#endif /* _NGBE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_param.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_param.c new file mode 100644 index 000000000000..aaf91f2ef2af --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_param.c @@ -0,0 +1,932 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include +#include + +#include "ngbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define NGBE_MAX_NIC 32 +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define NGBE_PARAM_INIT { [0 ... NGBE_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when ngbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define NGBE_PARAM(X, desc) \ + static const int __devinitconst X[NGBE_MAX_NIC+1] = NGBE_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(NGBE_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else /* !module_param_array */ +#define NGBE_PARAM(X, desc) \ + static int __devinitdata X[NGBE_MAX_NIC+1] = NGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif /* module_param_array */ + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +NGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); +NGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define NGBE_INT_LEGACY 0 +#define NGBE_INT_MSI 1 +#define NGBE_INT_MSIX 2 +#define NGBE_DEFAULT_INT NGBE_INT_MSIX + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +NGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-64 + * - 0 - enables RSS and sets the Desc. Q's to min(64, num_online_cpus()). + * - 1-64 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +NGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 1 + */ + +#define NGBE_DEFAULT_NUM_VMDQ 8 + +NGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, " + "2-16 enable (default=" XSTRINGIFY(NGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 8 + +NGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/* + *Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. +*/ +NGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 980-500000 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR (NGBE_STATIC_ITR == 0) || \ + (NGBE_STATIC_ITR == 1)?NGBE_STATIC_ITR:(u16)((1000000/NGBE_STATIC_ITR) << 2) + +NGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,980-500000), default 1"); +#define MAX_ITR NGBE_MAX_INT_RATE +#define MIN_ITR NGBE_MIN_INT_RATE + +#ifndef NGBE_NO_LLI + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +#endif /* NGBE_NO_LLI */ +#ifdef HAVE_TX_MQ +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +NGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); + +#define NGBE_MAX_ATR_SAMPLE_RATE 255 +#define NGBE_MIN_ATR_SAMPLE_RATE 1 +#define NGBE_ATR_SAMPLE_RATE_OFF 0 +#define NGBE_DEFAULT_ATR_SAMPLE_RATE 20 +#endif /* HAVE_TX_MQ */ + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +NGBE_PARAM(LRO, "Large Receive Offload (0,1), default 1 = on"); + +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +NGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000)," + "default 0 = off"); + +/* Rx buffer mode + * + * Valid Range: 0-1 0 = no header split, 1 = hdr split + * + * Default Value: 0 + */ +NGBE_PARAM(RxBufferMode, "0=(default)no header split\n" + "\t\t\t1=hdr split for recognized packet\n"); + +#define NGBE_RXBUFMODE_NO_HEADER_SPLIT 0 +#define NGBE_RXBUFMODE_HEADER_SPLIT 1 +#define NGBE_DEFAULT_RXBUFMODE NGBE_RXBUFMODE_NO_HEADER_SPLIT + + +struct ngbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ngbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int __devinit ngbe_validate_option(u32 *value, + struct ngbe_option *opt) +{ + int val = (int)*value; + + if (val == OPTION_UNSET) { + ngbe_info("ngbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (val) { + case OPTION_ENABLED: + ngbe_info("ngbe: %s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + ngbe_info("ngbe: %s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((val >= opt->arg.r.min && val <= opt->arg.r.max) || + val == opt->def) { + if (opt->msg) + ngbe_info("ngbe: %s set to %d, %s\n", + opt->name, val, opt->msg); + else + ngbe_info("ngbe: %s set to %d\n", + opt->name, val); + return 0; + } + break; + case list_option: { + int i; + const struct ngbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (val == ent->i) { + if (ent->str[0] != '\0') + ngbe_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG_ON(1); + } + + ngbe_info("ngbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return -1; +} + +/** + * ngbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit ngbe_check_options(struct ngbe_adapter *adapter) +{ + u32 bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct ngbe_ring_feature *feature = adapter->ring_feature; + u32 vmdq; + + if (bd >= NGBE_MAX_NIC) { + ngbe_notice("Warning: no configuration for board #%d\n", bd); + ngbe_notice("Using defaults for all values\n"); +#ifndef module_param_array + bd = NGBE_MAX_NIC; +#endif + } + + { /* Interrupt Mode */ + u32 int_mode; + static struct ngbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of "__MODULE_STRING(NGBE_DEFAULT_INT), + .def = NGBE_DEFAULT_INT, + .arg = { .r = { .min = NGBE_INT_LEGACY, + .max = NGBE_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + ngbe_validate_option(&int_mode, &opt); + switch (int_mode) { + case NGBE_INT_MSIX: + if (!(*aflags & NGBE_FLAG_MSIX_CAPABLE)) + ngbe_info( + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case NGBE_INT_MSI: + if (!(*aflags & NGBE_FLAG_MSI_CAPABLE)) { + ngbe_info( + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + } + break; + case NGBE_INT_LEGACY: + default: + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~NGBE_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (opt.def == NGBE_INT_MSIX && + *aflags & NGBE_FLAG_MSIX_CAPABLE) { + *aflags |= NGBE_FLAG_MSIX_CAPABLE; + *aflags |= NGBE_FLAG_MSI_CAPABLE; + } else if (opt.def == NGBE_INT_MSI && + *aflags & NGBE_FLAG_MSI_CAPABLE) { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags |= NGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~NGBE_FLAG_MSI_CAPABLE; + } + } +#endif + } + { /* Multiple Queue Support */ + static struct ngbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_MQ > bd) { +#endif + u32 mq = MQ[bd]; + ngbe_validate_option(&mq, &opt); + if (mq) + *aflags |= NGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= NGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } +#endif + /* Check Interoperability */ + if ((*aflags & NGBE_FLAG_MQ_CAPABLE) && + !(*aflags & NGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } + } + + { /* Receive-Side Scaling (RSS) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + u32 rss = RSS[bd]; + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = min_t(int, ngbe_max_rss_indices(adapter), + num_online_cpus()); + +#ifdef module_param_array + if (num_RSS > bd) { +#endif + ngbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + + feature[RING_F_RSS].limit = (u16)rss; +#ifdef module_param_array + } else if (opt.def == 0) { + rss = min_t(int, ngbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_RSS].limit = rss; + } +#endif + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = NGBE_MAX_VMDQ_INDICES + } } + }; + +#ifdef module_param_array + if (num_VMDQ > bd) { +#endif + vmdq = VMDQ[bd]; + + ngbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= NGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = (u16)vmdq; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= NGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } +#endif + /* Check Interoperability */ + if (*aflags & NGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + u32 vfs = max_vfs[bd]; + if (ngbe_validate_option(&vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->num_vfs = vfs; + + if (vfs) + *aflags |= NGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) { + adapter->num_vfs = 0; + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->num_vfs = opt.def; + *aflags |= NGBE_FLAG_SRIOV_ENABLED; + } + } +#endif + + /* Check Interoperability */ + if (*aflags & NGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & NGBE_FLAG_SRIOV_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported on this " + "hardware. Disabling IOV.\n"); + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; + } else if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported while multiple " + "queues are disabled. " + "Disabling IOV.\n"); + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct ngbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + +#ifdef module_param_array + if (num_VEPA > bd) { +#endif + u32 vepa = VEPA[bd]; + ngbe_validate_option(&vepa, &opt); + if (vepa) + adapter->flags |= + NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } +#endif + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + ngbe_validate_option(&itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (u16)((1000000/itr) << 2); + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } +#ifndef NGBE_NO_LLI + { /* Low Latency Interrupt TCP Port*/ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + +#ifdef module_param_array + if (num_LLIPort > bd) { +#endif + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + ngbe_validate_option(&adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_port = opt.def; + } +#endif + } + { /* Low Latency Interrupt on Packet Size */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + +#ifdef module_param_array + if (num_LLISize > bd) { +#endif + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + ngbe_validate_option(&adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_size = opt.def; + } +#endif + } + { /* Low Latency Interrupt EtherType*/ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + +#ifdef module_param_array + if (num_LLIEType > bd) { +#endif + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + ngbe_validate_option(&adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_etype = opt.def; + } +#endif + } + { /* LLI VLAN Priority */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + +#ifdef module_param_array + if (num_LLIVLANP > bd) { +#endif + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + ngbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_vlan_pri = opt.def; + } +#endif + } +#endif /* NGBE_NO_LLI */ +#ifdef HAVE_TX_MQ + { /* Flow Director ATR Tx sample packet rate */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(NGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = NGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = NGBE_ATR_SAMPLE_RATE_OFF, + .max = NGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + ngbe_validate_option(&adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } +#endif /* HAVE_TX_MQ */ + { /* LRO - Set Large Receive Offload */ + struct ngbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", +/* lro switch to ON when run on SW and FT platform */ +/* emerald temp setting */ +#if defined(TXGBE_SUPPORT_DEEPIN_SW) || \ + defined(TXGBE_SUPPORT_KYLIN_SW) || \ + defined(TXGBE_SUPPORT_KYLIN_FT) + .def = OPTION_ENABLED +#else + .def = OPTION_DISABLED +#endif + }; + struct net_device *netdev = adapter->netdev; + +#ifdef NGBE_NO_LRO + opt.def = OPTION_DISABLED; + +#endif +#ifdef module_param_array + if (num_LRO > bd) { +#endif + u32 lro = LRO[bd]; + ngbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; +#ifdef module_param_array + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } else { + netdev->features &= ~NETIF_F_LRO; + } +#endif +#ifdef NGBE_NO_LRO + if ((netdev->features & NETIF_F_LRO)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } +#endif /* NGBE_NO_LRO */ + } + { /* DMA Coalescing */ + struct ngbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this " + "hardware"; + + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + +#ifdef module_param_array + if (num_dmac_watchdog > bd) { +#endif + u32 dmac_wd = dmac_watchdog[bd]; + + ngbe_validate_option(&dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = (u16)dmac_wd; +#ifdef module_param_array + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } +#endif + } + + { /* Rx buffer mode */ + u32 rx_buf_mode; + static struct ngbe_option opt = { + .type = range_option, + .name = "Rx buffer mode", + .err = "using default of " + __MODULE_STRING(NGBE_DEFAULT_RXBUFMODE), + .def = NGBE_DEFAULT_RXBUFMODE, + .arg = {.r = {.min = NGBE_RXBUFMODE_NO_HEADER_SPLIT, + .max = NGBE_RXBUFMODE_HEADER_SPLIT} } + + }; + +#ifdef module_param_array + if (num_RxBufferMode > bd) { +#endif + rx_buf_mode = RxBufferMode[bd]; + ngbe_validate_option(&rx_buf_mode, &opt); + switch (rx_buf_mode) { + case NGBE_RXBUFMODE_NO_HEADER_SPLIT: + *aflags &= ~NGBE_FLAG_RX_HS_ENABLED; + break; + case NGBE_RXBUFMODE_HEADER_SPLIT: + *aflags |= NGBE_FLAG_RX_HS_ENABLED; + break; + default: + break; + } +#ifdef module_param_array + } else { + *aflags &= ~NGBE_FLAG_RX_HS_ENABLED; + } +#endif + + } +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c new file mode 100644 index 000000000000..49ed90c15f82 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c @@ -0,0 +1,293 @@ +#include +#include +#include "ngbe_pcierr.h" +#include "ngbe.h" +#define NGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \ + PCI_ERR_ROOT_CMD_NONFATAL_EN| \ + PCI_ERR_ROOT_CMD_FATAL_EN) + +#ifndef PCI_ERS_RESULT_NO_AER_DRIVER +/* No AER capabilities registered for the driver */ +#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t) 6) +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +/* redefinition because centos 6 can't use pci_walk_bus in pci.h*/ + +struct rw_semaphore pci_bus_sem; + +/** pci_walk_bus - walk devices on/under bus, calling callback. + * @top bus whose devices should be walked + * @cb callback to be called for each device found + * @userdata arbitrary pointer to be passed to callback. + * + * Walk the given bus, including any bridged devices + * on buses under this bus. Call the provided callback + * on each device found. + * + * We check the return of @cb each time. If it returns anything + * other than 0, we break out. + * + */ +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata) +{ + struct pci_dev *dev; + struct pci_bus *bus; + struct list_head *next; + int retval; + + bus = top; + down_read(&pci_bus_sem); + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + retval = cb(dev, userdata); + if (retval) + break; + } + up_read(&pci_bus_sem); +} +#endif + +static pci_ers_result_t merge_result(enum pci_ers_result orig, + enum pci_ers_result new) +{ + if (new == PCI_ERS_RESULT_NO_AER_DRIVER) + return PCI_ERS_RESULT_NO_AER_DRIVER; + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { + case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_RECOVERED: + orig = new; + break; + case PCI_ERS_RESULT_DISCONNECT: + if (new == PCI_ERS_RESULT_NEED_RESET) + orig = PCI_ERS_RESULT_NEED_RESET; + break; + default: + break; + } + return orig; +} + +static int ngbe_report_error_detected(struct pci_dev *dev, + pci_channel_state_t state, + enum pci_ers_result *result) +{ + pci_ers_result_t vote; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->error_detected) { + /* + * If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. + */ + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + vote = PCI_ERS_RESULT_NO_AER_DRIVER; + else + vote = PCI_ERS_RESULT_NONE; + } else { + err_handler = dev->driver->err_handler; + vote = err_handler->error_detected(dev, state); + } + + *result = merge_result(*result, vote); + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_frozen_detected(struct pci_dev *dev, void *data) +{ + return ngbe_report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int ngbe_report_mmio_enabled(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->mmio_enabled) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->mmio_enabled(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_slot_reset(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->slot_reset) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->slot_reset(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_resume(struct pci_dev *dev, void *data) +{ + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + dev->error_state = pci_channel_io_normal; + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->resume) + goto out; + + err_handler = dev->driver->err_handler; + err_handler->resume(dev); +out: + device_unlock(&dev->dev); + return 0; +} + +void ngbe_pcie_do_recovery(struct pci_dev *dev) +{ + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + u32 reg32; + int pos; + int delay = 1; + u32 id; + u16 ctrl; + /* + * Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_walk_bus(bus, ngbe_report_frozen_detected, &status); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~NGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double + * * this to 2ms to ensure that we meet the minimum requirement. + * */ + + msleep(2); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * Trhfa for conventional PCI is 2^25 clock cycles. + * * Assuming a minimum 33MHz clock this results in a 1s + * * delay before we can consider subordinate devices to + * * be re-initialized. PCIe has some ways to shorten this, + * * but we don't make use of them yet. + * */ + ssleep(1); + + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > 60000) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, "bus_reset"); + return; + } + + if (delay > 1000) + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, "bus_reset"); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + pci_info(dev, "ready %dms after %s\n", delay - 1, + "bus_reset"); + + pci_info(dev, "Root Port link has been reset\n"); + + if (pos) { + /* Clear Root Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); + + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= NGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, ngbe_report_mmio_enabled, &status); + } + + if (status == PCI_ERS_RESULT_NEED_RESET) { + /* + * TODO: Should call platform-specific + * functions to reset slot before calling + * drivers' slot_reset callbacks? + */ + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, ngbe_report_slot_reset, &status); + } + + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, ngbe_report_resume, &status); + +failed: + return; +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h new file mode 100644 index 000000000000..30da7c0b15b6 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h @@ -0,0 +1,6 @@ +#ifndef _NGBE_PCIERR_H_ +#define _NGBE_PCIERR_H_ + +void ngbe_pcie_do_recovery(struct pci_dev *dev); +#endif + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c new file mode 100644 index 000000000000..228d47384961 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c @@ -0,0 +1,1777 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_phy.h" +#include "ngbe_hw.h" + +/** + * ngbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +bool ngbe_check_reset_blocked(struct ngbe_hw *hw) +{ + u32 mmngc; + + mmngc = rd32(hw, NGBE_MIS_ST); + if (mmngc & NGBE_MIS_ST_MNG_VETO) { + return true; + } + + return false; +} + +/* For internal phy only */ +static int ngbe_phy_read_reg(struct ngbe_hw *hw, u32 reg_offset, + u32 page, u16 *phy_data) +{ + /* clear input */ + *phy_data = 0; + + if (!((page == 0xa43) && ((reg_offset == 0x1a) || (reg_offset == 0x1d)))) + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), page); + + *phy_data = 0xFFFF & rd32(hw, NGBE_PHY_CONFIG(reg_offset)); + + return NGBE_OK; +} + +/* For internal phy only */ +static int ngbe_phy_write_reg(struct ngbe_hw *hw, u32 reg_offset, + u32 page, u16 phy_data) +{ + + if (!((page == 0xa43) && ((reg_offset == 0x1a) || (reg_offset == 0x1d)))) + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), page); + wr32(hw, NGBE_PHY_CONFIG(reg_offset), phy_data); + + return NGBE_OK; +} + +static int ngbe_check_internal_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u16 phy_id = 0; + + ngbe_gphy_wait_mdio_access_on(hw); + + hw->phy.ops.read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + hw->phy.ops.read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (phy_id != NGBE_INTERNAL_PHY_ID) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "internal phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + hw->phy.id = (u32)phy_id; + + return NGBE_OK; +} + + +/** + * ngbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +int ngbe_phy_read_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + u32 command; + int status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, NGBE_MSCC); + + return 0; +} + +/** + * ngbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +int ngbe_phy_write_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + u32 command; + int status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = phy_data | NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + return 0; +} + +int ngbe_phy_read_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_mdi(hw, 0x1e, device_type, reg_addr); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, 0x1f, device_type, phy_data); + + return status; +} + +int ngbe_phy_write_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_mdi(hw, 0x1e, device_type, reg_addr); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, 0x1f, device_type, phy_data); + + return status; +} + +int ngbe_phy_read_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_read_reg_ext_yt8521s(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + +int ngbe_phy_write_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_write_reg_ext_yt8521s(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + + +int ngbe_phy_read_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + +int ngbe_phy_write_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + +static int ngbe_check_mdi_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u32 phy_id = 0; + u8 value = 0; + u32 phy_mode = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + /* select page 0 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 0); + } else { + /* select page 1 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 1); + } + + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (phy_id != NGBE_M88E1512_PHY_ID) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + hw->phy.id = phy_id; + + if (hw->phy.type == ngbe_phy_m88e1512_unknown) { + ngbe_flash_read_dword(hw, 0xff010, &phy_mode); + switch (hw->bus.lan_id) { + case 0: + value = (u8)phy_mode; + break; + case 1: + value = (u8)(phy_mode >> 8); + break; + case 2: + value = (u8)(phy_mode >> 16); + break; + case 3: + value = (u8)(phy_mode >> 24); + break; + default: + break; + } + if ((value & 0x7) == 0) + /* mode select to RGMII-to-copper */ + hw->phy.type = ngbe_phy_m88e1512; + else if ((value & 0x7) == 0x2) + /* mode select to RGMII-to-sfi */ + hw->phy.type = ngbe_phy_m88e1512_sfi; + else { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "marvell 88E1512 mode %x is not supported.\n", value); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + } + + return NGBE_OK; +} + +static bool ngbe_validate_phy_addr(struct ngbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + unsigned long flags; + + hw->phy.addr = phy_addr; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x3, 0, &phy_id); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +static int ngbe_check_yt_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id = 0; + bool valid = false; + u32 phy_addr; + unsigned long flags; + + for (phy_addr = 0; phy_addr < 32; phy_addr++) { + valid = ngbe_validate_phy_addr(hw, phy_addr); + if (valid) { + hw->phy.addr = phy_addr; + break; + } + } + if (!valid) + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x3, 0, &phy_id); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((phy_id != NGBE_YT8521S_PHY_ID) && (phy_id != NGBE_YT8531S_PHY_ID)) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + hw->phy.id = phy_id; + + return NGBE_OK; +} + +/** + * ngbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * +**/ +int ngbe_phy_init(struct ngbe_hw *hw) +{ + int ret_val = 0; + u16 value = 0; + int i; + u8 lan_id = hw->bus.lan_id; + struct ngbe_adapter *adapter = hw->back; + unsigned long flags; + + /* set fwsw semaphore mask for phy first */ + if (!hw->phy.phy_semaphore_mask) { + hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY; + } + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) + return 0; + /* init phy.addr according to HW design */ + + hw->phy.addr = 0; + spin_lock_init(&hw->phy_lock); + + /* Identify the PHY or SFP module */ + ret_val = hw->phy.ops.identify(hw); + if (ret_val == NGBE_ERR_SFP_NOT_SUPPORTED) + return ret_val; + + /* enable interrupts, only link status change and an done is allowed */ + if (hw->phy.type == ngbe_phy_internal || hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC; + hw->phy.ops.write_reg(hw, 0x12, 0xa42, value); + ngbe_flash_read_dword(hw , 0xfe010 + lan_id * 8, &adapter->gphy_efuse[0]); + ngbe_flash_read_dword(hw , 0xfe010 + lan_id * 8 + 4, &adapter->gphy_efuse[1]); + } else if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 2); + hw->phy.ops.read_reg_mdi(hw, 21, 0, &value); + value &= ~NGBE_M88E1512_RGM_TTC; + value |= NGBE_M88E1512_RGM_RTC; + hw->phy.ops.write_reg_mdi(hw, 21, 0, value); + if (hw->phy.type == ngbe_phy_m88e1512) + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + + hw->phy.ops.write_reg_mdi(hw, 0, 0, NGBE_MDI_PHY_RESET); + for (i = 0; i < 15; i++) { + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); + if (value & NGBE_MDI_PHY_RESET) + msleep(1); + else + break; + } + + if (i == 15) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "phy reset exceeds maximum waiting period.\n"); + return NGBE_ERR_PHY_TIMEOUT; + } + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + return ret_val; + } + + /* set LED2 to interrupt output and INTn active low */ + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 18, 0, &value); + value |= NGBE_M88E1512_INT_EN; + value &= ~(NGBE_M88E1512_INT_POL); + hw->phy.ops.write_reg_mdi(hw, 18, 0, value); + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 16, 0, &value); + value &= ~0x4; + hw->phy.ops.write_reg_mdi(hw, 16, 0, value); + } + + /* enable link status change and AN complete interrupts */ + value = NGBE_M88E1512_INT_ANC | NGBE_M88E1512_INT_LSC; + if (hw->phy.type == ngbe_phy_m88e1512) + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.write_reg_mdi(hw, 18, 0, value); + + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); + value |= 0x800; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + if (NGBE_POLL_LINK_STATUS != 1) { + /*enable yt8521s interrupt*/ + /* select sds area register */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, 0, 0x00); + + /* enable interrupt */ + value = 0x0C0C; + hw->phy.ops.write_reg_mdi(hw, 0x12, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + if (!hw->ncsi_enabled) { + /* power down in Fiber mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value |= 0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + + /* power down in UTP mode */ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + } + + return ret_val; +} + + +/** + * ngbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +int ngbe_phy_identify(struct ngbe_hw *hw) +{ + int status = 0; + + switch(hw->phy.type) { + case ngbe_phy_internal: + case ngbe_phy_internal_yt8521s_sfi: + status = ngbe_check_internal_phy_id(hw); + break; + case ngbe_phy_m88e1512: + case ngbe_phy_m88e1512_sfi: + case ngbe_phy_m88e1512_unknown: + status = ngbe_check_mdi_phy_id(hw); + break; + case ngbe_phy_yt8521s_sfi: + status = ngbe_check_yt_phy_id(hw); + break; + default: + status = NGBE_ERR_PHY_TYPE; + } + + return status; +} + +static int ngbe_gphy_reset(struct ngbe_hw *hw, bool need_restart_AN) +{ + int status, i; + u16 val; + + if (!need_restart_AN) + return 0; + + val = NGBE_MDI_PHY_RESET; + status = hw->phy.ops.write_reg(hw, 0, 0, val); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = hw->phy.ops.read_reg(hw, 0, 0, &val); + if (!(val & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +int ngbe_phy_reset(struct ngbe_hw *hw) +{ + int status = 0; + + u16 value = 0; + int i; + + /* only support internal phy */ + if (hw->phy.type != ngbe_phy_internal && + hw->phy.type != ngbe_phy_internal_yt8521s_sfi) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "ngbe_phy_reset: operation not supported.\n"); + return NGBE_ERR_PHY_TYPE; + } + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + NGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + value |= NGBE_MDI_PHY_RESET; + status = hw->phy.ops.write_reg(hw, 0, 0, value); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = hw->phy.ops.read_reg(hw, 0, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + u16 value = 0; + int status = 0; + + status = ngbe_gphy_reset(hw, need_restart_AN); + if (!hw->mac.autoneg) { + if (status) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "call phy reset return %d.\n", status); + return NGBE_ERR_PHY; + } + + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX; + hw->phy.ops.write_reg(hw, 0, 0, value); + + goto skip_an; + } + + /* disable 10/100M Half Duplex */ + hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= 0xFF5F; + hw->phy.ops.write_reg(hw, 4, 0, value); + + /* set advertise enable according to input speed */ + if (!(speed & NGBE_LINK_SPEED_1GB_FULL)) { + hw->phy.ops.read_reg(hw, 9, 0, &value); + value &= 0xFDFF; + hw->phy.ops.write_reg(hw, 9, 0, value); + } else { + hw->phy.ops.read_reg(hw, 9, 0, &value); + value |= 0x200; + hw->phy.ops.write_reg(hw, 9, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_100_FULL)) { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= 0xFEFF; + hw->phy.ops.write_reg(hw, 4, 0, value); + } else { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value |= 0x100; + hw->phy.ops.write_reg(hw, 4, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_10_FULL)) { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= 0xFFBF; + hw->phy.ops.write_reg(hw, 4, 0, value); + } else { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value |= 0x40; + hw->phy.ops.write_reg(hw, 4, 0, value); + } + + /* restart AN and wait AN done interrupt */ + if (hw->ncsi_enabled) { + if (need_restart_AN) + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + else + value = NGBE_MDI_PHY_ANE; + } else { + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + } + + hw->phy.ops.write_reg(hw, 0, 0, value); +skip_an: + hw->phy.ops.phy_led_ctrl(hw); + + hw->phy.ops.check_event(hw); + + return NGBE_OK; +} + +u32 ngbe_phy_led_ctrl(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + if (adapter->led_conf != -1) + value = adapter->led_conf & 0xffff; + else + value =0x205B; + hw->phy.ops.write_reg(hw, 16, 0xd04, value); + hw->phy.ops.write_reg(hw, 17, 0xd04, 0); + + hw->phy.ops.read_reg(hw, 18, 0xd04, &value); + if (adapter->led_conf != -1) { + value &= ~0x73; + value |= adapter->led_conf >> 16; + } else { + value = value & 0xFFFC; + /*act led blinking mode set to 60ms*/ + value |= 0x2; + } + hw->phy.ops.write_reg(hw, 18, 0xd04, value); + + return 0; +} + +int ngbe_phy_reset_m88e1512(struct ngbe_hw *hw) +{ + int status = 0; + + u16 value = 0; + int i; + + if (hw->phy.type != ngbe_phy_m88e1512 && + hw->phy.type != ngbe_phy_m88e1512_sfi) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + NGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + /* select page 18 reg 20 */ + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 18); + + if (hw->phy.type == ngbe_phy_m88e1512) + /* mode select to RGMII-to-copper */ + value = 0; + else + /* mode select to RGMII-to-sfi */ + value = 2; + status = hw->phy.ops.write_reg_mdi(hw, 20, 0, value); + /* mode reset */ + value |= NGBE_MDI_PHY_RESET; + status = hw->phy.ops.write_reg_mdi(hw, 20, 0, value); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = hw->phy.ops.read_reg_mdi(hw, 20, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "M88E1512 MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +int ngbe_phy_reset_yt8521s(struct ngbe_hw *hw) +{ + int status = 0; + + u16 value = 0; + int i; + unsigned long flags; + + if (hw->phy.type != ngbe_phy_yt8521s_sfi) + return NGBE_ERR_PHY_TYPE; + + if (hw->ncsi_enabled) + return status; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + NGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + + /* check chip_mode first */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xa001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & 7) != 0) {/* fiber_to_rgmii */ + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0, 0, &value); + /* sds software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (!(value & 0x8000)) + break; + msleep(1); + } + } else {/* utp_to_rgmii */ + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_mdi(hw, 0, 0, &value); + /* software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_mdi(hw, 0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_mdi(hw, 0, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (!(value & 0x8000)) + break; + msleep(1); + } + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "YT8521S Software RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + + +u32 ngbe_phy_setup_link_m88e1512(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u16 value_r4 = 0; + u16 value_r9 = 0; + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + + if (adapter->led_conf == -1) { + /* LED control */ + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 16, 0, &value); + value &= ~0x00FF; + value |= (NGBE_M88E1512_LED1_CONF << 4) | NGBE_M88E1512_LED0_CONF; + hw->phy.ops.write_reg_mdi(hw, 16, 0, value); + hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + value &= ~0x000F; + value |= (NGBE_M88E1512_LED1_POL << 2) | NGBE_M88E1512_LED0_POL; + hw->phy.ops.write_reg_mdi(hw, 17, 0, value); + } + + hw->phy.autoneg_advertised = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + + goto skip_an; + } + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + value_r9 |=NGBE_M88E1512_1000BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } + + if (speed & NGBE_LINK_SPEED_100_FULL) { + value_r4 |= NGBE_M88E1512_100BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + } + + if (speed & NGBE_LINK_SPEED_10_FULL) { + value_r4 |= NGBE_M88E1512_10BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + } + + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~(NGBE_M88E1512_100BASET_FULL | + NGBE_M88E1512_100BASET_HALF | + NGBE_M88E1512_10BASET_FULL | + NGBE_M88E1512_10BASET_HALF); + value_r4 |= value; + hw->phy.ops.write_reg_mdi(hw, 4, 0, value_r4); + + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + hw->phy.ops.read_reg_mdi(hw, 9, 0, &value); + value &= ~(NGBE_M88E1512_1000BASET_FULL | + NGBE_M88E1512_1000BASET_HALF); + value_r9 |= value; + hw->phy.ops.write_reg_mdi(hw, 9, 0, value_r9); + + value = NGBE_MDI_PHY_RESTART_AN | + NGBE_MDI_PHY_ANE | + NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + } else { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~0x60; + value |= 0x20; + hw->phy.ops.write_reg_mdi(hw, 4, 0, value); + + if (hw->mac.autoneg) + value = NGBE_MDI_PHY_RESTART_AN | + NGBE_MDI_PHY_ANE | + NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX | + NGBE_MDI_PHY_SPEED_SELECT1; + else + value = NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX | + NGBE_MDI_PHY_SPEED_SELECT1; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + } + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); +skip_an: + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); + value &= ~0x800; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + msleep(5); + + hw->phy.ops.check_event(hw); + + + return NGBE_OK; +} + +u32 ngbe_phy_setup_link_yt8521s(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + int ret_val = 0; + u16 value = 0; + u16 value_r4 = 0; + u16 value_r9 = 0; + unsigned long flags; + + if (hw->ncsi_enabled) + return ret_val; + hw->phy.autoneg_advertised = 0; + + /* check chip_mode first */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & 7) == 0) {/* utp_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + goto skip_an; + } + + value_r4 = 0x1E0; + value_r9 = 0x300; + /*disable 100/10base-T Self-negotiation ability*/ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &=~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &=~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |=value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |=value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x9200; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); +skip_an: + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 1) {/* fiber_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_LINK_SPEED_1GB_FULL; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_LINK_SPEED_100_FULL; + break; + default: + value = NGBE_LINK_SPEED_1GB_FULL; + break; + } + hw->phy.autoneg_advertised |= value; + goto skip_an_fiber; + } + + value = 0; + if (speed & NGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + if (speed & NGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; +skip_an_fiber: + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA006, 0, &value); + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + value |= 0x1; + else if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + value &= ~0x1; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA006, 0, value); + + /* close auto sensing */ + ngbe_phy_read_reg_sds_ext_yt8521s(hw, 0xA5, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_sds_ext_yt8521s(hw, 0xA5, 0, value); + + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + /* RGMII_Config1 : Config rx and tx training delay */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA003, 0, 0x3cf1); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, 0x8041); + + /* software reset */ + if (hw->mac.autoneg) { + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0,0x9340); + } else { + value = NGBE_YT8521S_PHY_RESET | NGBE_YT8521S_PHY_DUPLEX; + if (speed & NGBE_LINK_SPEED_1GB_FULL) + value |= NGBE_YT8521S_PHY_SPEED_SELECT1; + if (speed & NGBE_LINK_SPEED_100_FULL) + value |= NGBE_YT8521S_PHY_SPEED_SELECT0; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); + + } else if ((value & 7) == 2) { + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + + /* power on in Fiber mode */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (value & 0x400) { /* fiber up */ + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } else { /* utp up */ + value_r4 = 0x1E0; + value_r9 = 0x300; + /*disable 100/10base-T Self-negotiation ability*/ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &=~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &=~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |=value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |=value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + } else if ((value & 7) == 4) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA003, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA003, 0, value); + + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA004, 0, &value); + value &= ~0xf0; + value |= 0xb0; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA004, 0, value); + + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, value); + + /* power on phy */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 5) {/* sgmii_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + goto skip_an_sr; + } + + value = 0; + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value |= 0x40; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value |= 0x2000; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value |= 0x0; + } + + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + + /* software reset to make the above configuration take effect */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value |= 0x9200; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); +skip_an_sr: + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + hw->phy.ops.check_event(hw); + + return ret_val; +} + +/** + * ngbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +int ngbe_phy_check_overtemp(struct ngbe_hw *hw) +{ + int status = 0; + u32 ts_state; + + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, NGBE_TS_ALARM_ST); + + if (ts_state & NGBE_TS_ALARM_ST_DALARM) + status = NGBE_ERR_UNDERTEMP; + else if (ts_state & NGBE_TS_ALARM_ST_ALARM) + status = NGBE_ERR_OVERTEMP; + + return status; +} + +int ngbe_phy_check_event(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + hw->phy.ops.read_reg(hw, 0x1d, 0xa43, &value); + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + if (value & BIT(4)) + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + else if (value & BIT(3)) + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return NGBE_OK; +} + +int ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + if (hw->phy.type == ngbe_phy_m88e1512) + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 19, 0, &value); + + if (value & NGBE_M88E1512_LSC) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } + + if (value & NGBE_M88E1512_ANC) { + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + } + + return NGBE_OK; +} + +int ngbe_phy_check_event_yt8521s(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000,0,0x0); + hw->phy.ops.read_reg_mdi(hw, 0x13, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & (NGBE_YT8521S_SDS_LINK_UP | NGBE_YT8521S_SDS_LINK_DOWN)) || + (value & (NGBE_YT8521S_UTP_LINK_UP | NGBE_YT8521S_UTP_LINK_DOWN))) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } + + return NGBE_OK; +} + +static int ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + status = hw->phy.ops.read_reg(hw, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +int ngbe_phy_get_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +int ngbe_phy_get_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x04, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + *pause_bit = (u8)((value >> 7) & 0x3); + return status; +} + +static int ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + status = hw->phy.ops.read_reg(hw, 0x1d, 0xa43, &value); + + status = hw->phy.ops.read_reg(hw, 0x1, 0, &value); + value = (value >> 5) & 0x1; + + /* if AN complete then check lp adv pause */ + status = hw->phy.ops.read_reg(hw, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +int ngbe_phy_get_lp_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + status = hw->phy.ops.read_reg_mdi(hw, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 5, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +int ngbe_phy_get_lp_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x05, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + *pause_bit = (u8)((value >> 7) & 0x3); + return status; + +} + +static int ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, u16 pause_bit) +{ + u16 value = 0; + int status = 0; + + status = hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = hw->phy.ops.write_reg(hw, 4, 0, value); + return status; +} + +int ngbe_phy_set_pause_advertisement_m88e1512(struct ngbe_hw *hw, + u16 pause_bit) +{ + u16 value = 0; + int status = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = hw->phy.ops.write_reg_mdi(hw, 4, 0, value); + } else { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = hw->phy.ops.write_reg_mdi(hw, 4, 0, value); + } + + return status; +} + +int ngbe_phy_set_pause_advertisement_yt8521s(struct ngbe_hw *hw, + u16 pause_bit) +{ + u16 value = 0; + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x04, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x04, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + return status; +} +int ngbe_gphy_dis_eee(struct ngbe_hw *hw) +{ + u16 val = 0; + + hw->phy.ops.write_reg(hw, 0x11, 0xa4b, 0x1110); + hw->phy.ops.write_reg(hw, 0xd, 0x0, 0x7); + hw->phy.ops.write_reg(hw, 0xe, 0x0, 0x003c); + hw->phy.ops.write_reg(hw, 0xd, 0x0, 0x4007); + hw->phy.ops.write_reg(hw, 0xe, 0x0, 0); + + /* disable 10/100M Half Duplex */ + msleep(100); + hw->phy.ops.read_reg(hw, 4, 0, &val); + val &= 0xff5f; + hw->phy.ops.write_reg(hw, 0x4, 0x0, val); + + return 0; +} + +int ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw) +{ + int i; + u16 val = 0; + struct ngbe_adapter *adapter = hw->back; + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, 29, 0xa43, &val); + if (val & 0x20) { + e_info(hw, "mdio_access ready\n"); + break; + } + usec_delay(1000); + } + + if (i == 100) + e_info(hw, "ngbe_gphy_wait_mdio_access_on timeout\n"); + + return 0; +} + +int ngbe_gphy_efuse_calibration(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u32 efuse[2]; + u16 val; + + ngbe_gphy_wait_mdio_access_on(hw); + efuse[0] = adapter->gphy_efuse[0]; + efuse[1] = adapter->gphy_efuse[1]; + + e_info(hw, "=1=port %d efuse[0] = %08x, efuse[1] = %08x\n", hw->bus.lan_id, efuse[0], efuse[1]); + + if (!efuse[0] && !efuse[1]) { + efuse[0] = 0xFFFFFFFF; + efuse[1] = 0xFFFFFFFF; + } + + /* calibration */ + efuse[0] |= 0xF0000100; + efuse[1] |= 0xFF807FFF; + e_info(hw, "=2=port %d efuse[0] = %08x, efuse[1] = %08x\n", hw->bus.lan_id, efuse[0], efuse[1]); + + /* EODR, Efuse Output Data Register */ + hw->phy.ops.write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xFFFF); + hw->phy.ops.write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xFFFF); + hw->phy.ops.write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xFFFF); + hw->phy.ops.write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xFFFF); + + hw->phy.ops.write_reg(hw, 20, 0xa46, 0x01); /* set efuse ready */ + ngbe_gphy_wait_mdio_access_on(hw); + hw->phy.ops.write_reg(hw, 27, 0xa43, 0x8011); + hw->phy.ops.write_reg(hw, 28, 0xa43, 0x5737); + /* dis fall to 100m */ + hw->phy.ops.read_reg(hw, 17, 0xa44, &val); + val &= ~0x8; + hw->phy.ops.write_reg(hw, 17, 0xa44, val); + ngbe_gphy_dis_eee(hw); + + return 0; +} + +static int ngbe_phy_setup(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 value = 0; + int i; + + if (test_bit(__NGBE_NO_PHY_SET, &adapter->state)) + return 0; + ngbe_gphy_efuse_calibration(hw); + hw->phy.ops.write_reg(hw, 20, 0xa46, 2); + ngbe_gphy_wait_mdio_access_on(hw); + + for (i = 0; i < 100;i++) { + hw->phy.ops.read_reg(hw, 16, 0xa42, &value); + if ((value & 0x7) == 3) + break; + usec_delay(1000); + } + + if (i == 100) + return NGBE_ERR_PHY_TIMEOUT; + + return 0; +} + +static int ngbe_phy_read_reg_internal(struct ngbe_hw *hw, int phy_addr, int regnum) +{ + if (phy_addr != 0) + return 0xffff; + return (u16)rd32(hw, NGBE_PHY_CONFIG(regnum)); +} + +static int ngbe_phy_write_reg_internal(struct ngbe_hw *hw, int phy_addr, int regnum, u16 value) +{ + if (phy_addr == 0) + wr32(hw, NGBE_PHY_CONFIG(regnum), value); + return 0; +} + +static int ngbe_phy_read_reg_mdi_c22(struct ngbe_hw *hw, int phy_addr, int regnum) +{ + u32 command, device_type = 0; + int ret; + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + ret = po32m(hw, NGBE_MSCC, NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (ret) + return ret; + + return (u16)rd32(hw, NGBE_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c22(struct ngbe_hw *hw, int phy_addr, int regnum, u16 value) +{ + u32 command, device_type = 0; + int ret; + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + command = value | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + ret = po32m(hw, NGBE_MSCC, NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + + return ret; +} + +static int ngbe_phy_read_reg_c22(struct ngbe_hw *hw, int phy_addr, int regnum) +{ + u16 phy_data; + + if (hw->mac_type == em_mac_type_mdi) + phy_data = ngbe_phy_read_reg_internal(hw, phy_addr, regnum); + else + phy_data = ngbe_phy_read_reg_mdi_c22(hw, phy_addr, regnum); + + return phy_data; +} + +static int ngbe_phy_write_reg_c22(struct ngbe_hw *hw, int phy_addr, + int regnum, u16 value) +{ + int ret; + + if (hw->mac_type == em_mac_type_mdi) + ret = ngbe_phy_write_reg_internal(hw, phy_addr, regnum, value); + else + ret = ngbe_phy_write_reg_mdi_c22(hw, phy_addr, regnum, value); + + return ret; +} + +static int ngbe_genphy_suspend(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 val; + + if (adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP || + hw->ncsi_enabled) + return 0; + hw->phy.ops.read_reg(hw, 0x0, 0x0, &val); + + return hw->phy.ops.write_reg(hw, 0x0, 0x0, val | 0x800); +} + +int ngbe_mv_suspend(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 val; + + if (adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP || + hw->ncsi_enabled) + return 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + hw->phy.ops.read_reg_mdi(hw, 0, 0, &val); + + return hw->phy.ops.write_reg_mdi(hw, 0x0, 0x0, val | 0x800); + } else { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 0, 0, &val); + + return hw->phy.ops.write_reg_mdi(hw, 0x0, 0x0, val | 0x800); + } +} + +int ngbe_yt_suspend(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 val; + unsigned long flags; + + if (adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP || + hw->ncsi_enabled) + return 0; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &val); + val |= 0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, val); + + /* power down in UTP mode */ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &val); + val |= 0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, val); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + return 0; +} + +static int ngbe_genphy_resume(struct ngbe_hw *hw) +{ + u16 val; + + hw->phy.ops.read_reg(hw, 0x0, 0x0, &val); + + return hw->phy.ops.write_reg(hw, 0x0, 0x0, val & (~0x800)); +} + +void ngbe_init_phy_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_phy_info *phy = &hw->phy; + + phy->ops.reset = ngbe_phy_reset; + phy->ops.read = ngbe_phy_read_reg_c22; + phy->ops.write = ngbe_phy_write_reg_c22; + phy->ops.read_reg = ngbe_phy_read_reg; + phy->ops.write_reg = ngbe_phy_write_reg; + phy->ops.setup_link = ngbe_phy_setup_link; + phy->ops.phy_suspend = ngbe_genphy_suspend; + phy->ops.phy_resume = ngbe_genphy_resume; + phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl; + phy->ops.check_overtemp = ngbe_phy_check_overtemp; + phy->ops.identify = ngbe_phy_identify; + phy->ops.init = ngbe_phy_init; + phy->ops.check_event = ngbe_phy_check_event; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement; + phy->ops.setup_once = ngbe_phy_setup; +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h new file mode 100644 index 000000000000..96ab514505c3 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h @@ -0,0 +1,203 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_PHY_H_ +#define _NGBE_PHY_H_ + +#include "ngbe_type.h" +#include "ngbe.h" + +/* EEPROM byte offsets */ +#define NGBE_SFF_IDENTIFIER 0x0 +#define NGBE_SFF_IDENTIFIER_SFP 0x3 +#define NGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define NGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define NGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define NGBE_SFF_1GBE_COMP_CODES 0x6 +#define NGBE_SFF_10GBE_COMP_CODES 0x3 +#define NGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define NGBE_SFF_CABLE_SPEC_COMP 0x3C +#define NGBE_SFF_SFF_8472_SWAP 0x5C +#define NGBE_SFF_SFF_8472_COMP 0x5E +#define NGBE_SFF_SFF_8472_OSCB 0x6E +#define NGBE_SFF_SFF_8472_ESCB 0x76 +#define NGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define NGBE_SFF_QSFP_CONNECTOR 0x82 +#define NGBE_SFF_QSFP_10GBE_COMP 0x83 +#define NGBE_SFF_QSFP_1GBE_COMP 0x86 +#define NGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define NGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define NGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define NGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define NGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define NGBE_SFF_1GBASESX_CAPABLE 0x1 +#define NGBE_SFF_1GBASELX_CAPABLE 0x2 +#define NGBE_SFF_1GBASET_CAPABLE 0x8 +#define NGBE_SFF_10GBASESR_CAPABLE 0x10 +#define NGBE_SFF_10GBASELR_CAPABLE 0x20 +#define NGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define NGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define NGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define NGBE_SFF_ADDRESSING_MODE 0x4 +#define NGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define NGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define NGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define NGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define NGBE_I2C_EEPROM_READ_MASK 0x100 +#define NGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define NGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define NGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define NGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define NGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define NGBE_CS4227 0xBE /* CS4227 address */ +#define NGBE_CS4227_GLOBAL_ID_LSB 0 +#define NGBE_CS4227_SCRATCH 2 +#define NGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define NGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define NGBE_CS4227_RETRIES 5 +#define NGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define NGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define NGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define NGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define NGBE_CS4227_EDC_MODE_CX1 0x0002 +#define NGBE_CS4227_EDC_MODE_SR 0x0004 +#define NGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define NGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define NGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define NGBE_PE 0xE0 /* Port expander address */ +#define NGBE_PE_OUTPUT 1 /* Output register offset */ +#define NGBE_PE_CONFIG 3 /* Config register offset */ +#define NGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define NGBE_TAF_SYM_PAUSE (0x1) +#define NGBE_TAF_ASM_PAUSE (0x2) + +/* Bit-shift macros */ +#define NGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define NGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define NGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define NGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define NGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define NGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define NGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define NGBE_I2C_T_HD_STA 4 +#define NGBE_I2C_T_LOW 5 +#define NGBE_I2C_T_HIGH 4 +#define NGBE_I2C_T_SU_STA 5 +#define NGBE_I2C_T_HD_DATA 5 +#define NGBE_I2C_T_SU_DATA 1 +#define NGBE_I2C_T_RISE 1 +#define NGBE_I2C_T_FALL 1 +#define NGBE_I2C_T_SU_STO 4 +#define NGBE_I2C_T_BUF 5 + +#ifndef NGBE_SFP_DETECT_RETRIES +#define NGBE_SFP_DETECT_RETRIES 10 +#endif /* NGBE_SFP_DETECT_RETRIES */ + +/* SFP+ SFF-8472 Compliance */ +#define NGBE_SFF_SFF_8472_UNSUP 0x00 + +bool ngbe_check_reset_blocked(struct ngbe_hw *hw); +enum ngbe_phy_type ngbe_get_phy_type_from_id(struct ngbe_hw *hw); +void ngbe_init_phy_ops_common(struct ngbe_hw *hw); +int ngbe_phy_read_reg_mdi( struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_mdi( struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_read_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_read_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_read_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_init(struct ngbe_hw *hw); +int ngbe_phy_identify(struct ngbe_hw *hw); +int ngbe_phy_reset(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN); +u32 ngbe_phy_led_ctrl(struct ngbe_hw *hw); +int ngbe_phy_reset_m88e1512(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_m88e1512( struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_phy_check_overtemp(struct ngbe_hw *hw); + +int ngbe_mv_suspend(struct ngbe_hw *hw); +int ngbe_yt_suspend(struct ngbe_hw *hw); + +int ngbe_phy_check_event(struct ngbe_hw *hw); +int ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw); +int ngbe_phy_set_pause_advertisement_m88e1512(struct ngbe_hw *hw, + u16 pause_bit); +int ngbe_phy_get_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_get_lp_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_check_event_yt8521s(struct ngbe_hw *hw); +int ngbe_phy_get_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_get_lp_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_reset_yt8521s(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_yt8521s( struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_phy_set_pause_advertisement_yt8521s(struct ngbe_hw *hw, + u16 pause_bit); +int ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw); +int ngbe_gphy_efuse_calibration(struct ngbe_hw *hw); + +int ngbe_gphy_dis_eee(struct ngbe_hw *hw); + + +#endif /* _NGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c new file mode 100644 index 000000000000..d242e69974dc --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c @@ -0,0 +1,924 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef NGBE_PROCFS +#ifndef NGBE_SYSFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *ngbe_top_dir; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct ngbe_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +static int ngbe_fwbanner(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%s\n", adapter->eeprom_id); +} + +static int ngbe_porttype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + return snprintf(page, count, "%d\n", + test_bit(__NGBE_DOWN, &adapter->state)); +} + +static int ngbe_portspeed(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int speed = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case NGBE_LINK_SPEED_100_FULL: + speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + speed = 10; + break; + case NGBE_LINK_SPEED_10GB_FULL: + speed = 100; + break; + default: + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int ngbe_wqlflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int ngbe_xflowctl(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int ngbe_rxdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int ngbe_rxerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int ngbe_rxupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", rd32(hw, NGBE_TPR)); +} + +static int ngbe_rxmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int i, mprc = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + for (i = 0; i < 8; i++) + mprc += rd32(hw, NGBE_PX_MPRC(i)); + return snprintf(page, count, "%d\n", mprc); +} + +static int ngbe_rxbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW)); +} + +static int ngbe_txmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int ngbe_txdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int ngbe_rxframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int ngbe_rxbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int ngbe_txframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int ngbe_txbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int ngbe_linkstat(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int bitmask = 0; + u32 link_speed; + bool link_up = false; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + bitmask |= 1; + + /* always assume link is up, if no check link function */ + link_up = true; + if (link_up) + bitmask |= 2; + + if (adapter->old_lsc != adapter->lsc_int) { + bitmask |= 4; + adapter->old_lsc = adapter->lsc_int; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int ngbe_funcid(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%X\n", hw->bus.func); +} + +static int ngbe_funcvers(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%s\n", ngbe_driver_version); +} + +static int ngbe_macburn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int ngbe_macadmn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int ngbe_maclla1(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + int rc; + u16 eeprom_buff[6]; + u16 first_word = 0x37; + const u16 word_count = ARRAY_SIZE(eeprom_buff); + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + rc = hw->eeprom.ops.read_buffer(hw, first_word, 1, &first_word); + if (rc != 0) + return snprintf(page, count, + "error: reading pointer to the EEPROM\n"); + + if (first_word != 0x0000 && first_word != 0xFFFF) { + rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count, + eeprom_buff); + if (rc != 0) + return snprintf(page, count, "error: reading buffer\n"); + } else { + memset(eeprom_buff, 0, sizeof(eeprom_buff)); + } + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + default: + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); + } +} + +static int ngbe_mtusize(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int ngbe_featflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int bitmask = 0; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + if (adapter->netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +} + +static int ngbe_lsominct(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int ngbe_prommode(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int ngbe_txdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int ngbe_rxdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int ngbe_rxqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); +} + +static int ngbe_txqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + diff/adapter->num_tx_queues); +} + +static int ngbe_iovotype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "2\n"); +} + +static int ngbe_funcnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->num_vfs); +} + +static int ngbe_pciebnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->pdev->bus->number); +} + +static int ngbe_therm_dealarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->dalarm_thresh); +} + + +static int ngbe_therm_alarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->alarm_thresh); +} + +static int ngbe_therm_temp(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int status; + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = ngbe_get_thermal_sensor_data(therm_data->hw); + if (status != 0) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + + +struct ngbe_proc_type { + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct ngbe_proc_type ngbe_proc_entries[] = { + {"fwbanner", &ngbe_fwbanner}, + {"porttype", &ngbe_porttype}, + {"portspeed", &ngbe_portspeed}, + {"wqlflag", &ngbe_wqlflag}, + {"xflowctl", &ngbe_xflowctl}, + {"rxdrops", &ngbe_rxdrops}, + {"rxerrors", &ngbe_rxerrors}, + {"rxupacks", &ngbe_rxupacks}, + {"rxmpacks", &ngbe_rxmpacks}, + {"rxbpacks", &ngbe_rxbpacks}, + {"txdrops", &ngbe_txdrops}, + {"txerrors", &ngbe_txerrors}, + {"txupacks", &ngbe_txupacks}, + {"txmpacks", &ngbe_txmpacks}, + {"txbpacks", &ngbe_txbpacks}, + {"rxframes", &ngbe_rxframes}, + {"rxbytes", &ngbe_rxbytes}, + {"txframes", &ngbe_txframes}, + {"txbytes", &ngbe_txbytes}, + {"linkstat", &ngbe_linkstat}, + {"funcid", &ngbe_funcid}, + {"funcvers", &ngbe_funcvers}, + {"macburn", &ngbe_macburn}, + {"macadmn", &ngbe_macadmn}, + {"maclla1", &ngbe_maclla1}, + {"mtusize", &ngbe_mtusize}, + {"featflag", &ngbe_featflag}, + {"lsominct", &ngbe_lsominct}, + {"prommode", &ngbe_prommode}, + {"txdscqsz", &ngbe_txdscqsz}, + {"rxdscqsz", &ngbe_rxdscqsz}, + {"txqavg", &ngbe_txqavg}, + {"rxqavg", &ngbe_rxqavg}, + {"iovotype", &ngbe_iovotype}, + {"funcnbr", &ngbe_funcnbr}, + {"pciebnbr", &ngbe_pciebnbr}, + {"", NULL} +}; + +struct ngbe_proc_type ngbe_internal_entries[] = { + {"temp", &ngbe_therm_temp}, + {"alarmthresh", &ngbe_therm_alarmthresh}, + {"dealarmthresh", &ngbe_therm_dealarmthresh}, + {"", NULL} +}; + +void ngbe_del_proc_entries(struct ngbe_adapter *adapter) +{ + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (ngbe_top_dir == NULL) + return; + + for (i = 0; i < NGBE_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (ngbe_internal_entries[index].read == NULL) + break; + + remove_proc_entry(ngbe_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (ngbe_proc_entries[index].read == NULL) + break; + remove_proc_entry(ngbe_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), ngbe_top_dir); +} + +/* called from ngbe_main.c */ +void ngbe_procfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_del_proc_entries(adapter); +} + +int ngbe_procfs_topdir_init(void) +{ + ngbe_top_dir = proc_mkdir("driver/ngbe", NULL); + if (ngbe_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void ngbe_procfs_topdir_exit(void) +{ + remove_proc_entry("driver/ngbe", NULL); +} + +/* called from ngbe_main.c */ +int ngbe_procfs_init(struct ngbe_adapter *adapter) +{ + int rc = 0; + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + adapter->therm_dir = NULL; + + if (ngbe_top_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ngbe_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ngbe_proc_entries[index].read == NULL) + break; + if (!(create_proc_read_entry(ngbe_proc_entries[index].name, + 0444, + adapter->info_dir, + ngbe_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (!adapter->hw->ops.init_thermal_sensor_thresh(hw)) + goto exit; + + + snprintf(buf, sizeof(buf), "sensor"); + adapter->therm_dir = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ngbe_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data.hw = &adapter->hw; + adapter->therm_data.sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor; + + if (!(create_proc_read_entry( + ngbe_internal_entries[index].name, + 0444, + adapter->therm_dir, + ngbe_internal_entries[index].read, + &adapter->therm_data))) { + rc = -ENOMEM; + goto fail; + } + } + + goto exit; + +fail: + ngbe_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !NGBE_SYSFS */ +#endif /* NGBE_PROCFS */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c new file mode 100644 index 000000000000..822492aea4fe --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c @@ -0,0 +1,887 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#include "ngbe.h" +#include + +/* + * SYSTIME is defined by a fixed point system which allows the user to + * define the scale counter increment value at every level change of + * the oscillator driving SYSTIME value. The time unit is determined by + * the clock frequency of the oscillator and TIMINCA register. + * The cyclecounter and timecounter structures are used to to convert + * the scale counter into nanoseconds. SYSTIME registers need to be converted + * to ns values by use of only a right shift. + * The following math determines the largest incvalue that will fit into + * the available bits in the TIMINCA register: + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator, which changes based on the link + * speed: + * At 10Gb link or no link, the period is 6.4 ns. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * round(): discard the fractional portion of the calculation + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * LinkSpeed ClockFreq ClockPeriod TIMINCA:IV + * 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns) + * 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns) + * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) + * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) + * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) + * + * These diagrams are only for the 10Gb link period + * + * +--------------+ +--------------+ + * | 32 | | 8 | 3 | 20 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 43 bit SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define NGBE_INCVAL_10GB 0xCCCCCC +#define NGBE_INCVAL_1GB 0x2000000/*in Emerald all speed is same*/ +#define NGBE_INCVAL_100 0xA00000 +#define NGBE_INCVAL_10 0xC7F380 +#define NGBE_INCVAL_FPGA 0x800000 + +#define NGBE_INCVAL_SHIFT_10GB 20 +#define NGBE_INCVAL_SHIFT_1GB 22/*in Emerald all speed is same*/ +#define NGBE_INCVAL_SHIFT_100 15 +#define NGBE_INCVAL_SHIFT_10 12 +#define NGBE_INCVAL_SHIFT_FPGA 17 + +#define NGBE_OVERFLOW_PERIOD (HZ * 30) +#define NGBE_PTP_TX_TIMEOUT (HZ) + +/** + * ngbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 ngbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct ngbe_adapter *adapter = + container_of(hw_cc, struct ngbe_adapter, hw_cc); + struct ngbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIML); + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIMH) << 32; + + return stamp; +} + +/** + * ngbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ngbe_ptp_convert_to_hwtstamp(struct ngbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + + +/** + * ngbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ + #ifndef HAVE_NOT_PTT_ADJFREQ +static int ngbe_ptp_adjfreq(struct ptp_clock_info *ptp, int ppb) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + struct ngbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + /* temp setting*/ + + if (incval > NGBE_TSEC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + return 0; +} +#endif + +/** + * ngbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int ngbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ngbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int ngbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * ngbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ngbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +static int ngbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = ngbe_ptp_gettime64(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + + return 0; +} + +static int ngbe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return ngbe_ptp_settime64(ptp, &ts64); +} +#endif + +/** + * ngbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ngbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -ENOTSUPP; +} + +/** + * ngbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter) +{ + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; +} + +/** + * ngbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + NGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + ngbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ngbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, NGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * ngbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ngbe_ptp_clear_tx_timestamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + rd32(hw, NGBE_TSEC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__NGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * ngbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void ngbe_ptp_tx_hwtstamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ngbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, struct ngbe_adapter, + ptp_tx_work); + struct ngbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + NGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ngbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, NGBE_TSEC_1588_CTL); + if (tsynctxctl & NGBE_TSEC_1588_CTL_VALID) { + ngbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + ngbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * ngbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * ngbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * ngbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private ngbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int ngbe_ptp_set_timestamp_mode(struct ngbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = NGBE_TSEC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = NGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, NGBE_TSEC_1588_CTL); + regval &= ~NGBE_TSEC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, NGBE_TSEC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, NGBE_PSR_1588_CTL); + regval &= ~(NGBE_PSR_1588_CTL_ENABLED | NGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, NGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, NGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + NGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + ngbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, NGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * ngbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ngbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void ngbe_ptp_link_speed_adjust(struct ngbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + + *shift = NGBE_INCVAL_SHIFT_1GB; + *incval = NGBE_INCVAL_1GB; + + return; +} + +/** + * ngbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = ngbe_ptp_read; + ngbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ngbe_ptp_reset + * @adapter: the ngbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void ngbe_ptp_reset(struct ngbe_adapter *adapter) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + ngbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + ngbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; +} + +/** + * ngbe_ptp_create_clock + * @adapter: the ngbe private adapter structure + * + * This function performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by ngbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ + +static long ngbe_ptp_create_clock(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 500000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; +#ifndef HAVE_NOT_PTT_ADJFREQ + adapter->ptp_caps.adjfreq = ngbe_ptp_adjfreq; +#endif + adapter->ptp_caps.adjtime = ngbe_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + adapter->ptp_caps.gettime64 = ngbe_ptp_gettime64; + adapter->ptp_caps.settime64 = ngbe_ptp_settime64; +#else + adapter->ptp_caps.gettime = ngbe_ptp_gettime; + adapter->ptp_caps.settime = ngbe_ptp_settime; +#endif + adapter->ptp_caps.enable = ngbe_ptp_feature_enable; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * ngbe_ptp_init + * @adapter: the ngbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ngbe_ptp_init(struct ngbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (ngbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can intialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, ngbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + ngbe_ptp_reset(adapter); + + /* enter the NGBE_PTP_RUNNING state */ + set_bit(__NGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * ngbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void ngbe_ptp_suspend(struct ngbe_adapter *adapter) +{ + /* leave the NGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__NGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_PTP_PPS_ENABLED; + + cancel_work_sync(&adapter->ptp_tx_work); + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void ngbe_ptp_stop(struct ngbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + ngbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c new file mode 100644 index 000000000000..69ba0d682a7d --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c @@ -0,0 +1,1590 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_type.h" +#include "ngbe_sriov.h" + +#ifdef CONFIG_PCI_IOV +static int __ngbe_enable_sriov(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + adapter->flags |= NGBE_FLAG_SRIOV_ENABLED; + e_dev_info("SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= NGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (NGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, NGBE_PSR_CTL_SW_EN); + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (!adapter->vfinfo) { + adapter->num_vfs = 0; + e_dev_info("failed to allocate memory for VF Data Storage\n"); + return -ENOMEM; + } + + /* enable L2 switch and replication */ + adapter->flags |= NGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + NGBE_FLAG_SRIOV_REPLICATION_ENABLE; + // NGBE_FLAG_SRIOV_REPLICATION_ENABLE not used + +#ifdef NGBE_DISABLE_VF_MQ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; +#endif + + /* enable spoof checking for all VFs */ + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + +#endif + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = NGBEVF_XCAST_MODE_NONE; + } + + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_NUM_VT_MASK, NGBE_CFG_PORT_CTL_NUM_VT_8); + + return 0; +} + +#define NGBE_BA4_ADDR(vfinfo, reg) \ + ((u8 __iomem *)((u8 *)(vfinfo)->b4_addr + (reg))) + +/** + * ngbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ngbe_get_vfs(struct ngbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + struct vf_data_storage *vfinfo; + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + + /*pci_dev_get(vfdev);*/ + vfinfo = &adapter->vfinfo[vf]; + vfinfo->vfdev = vfdev; + vfinfo->b4_addr = ioremap(pci_resource_start(vfdev, 4), 64); + + ++vf; + } +} + +/** + * ngbe_pet_vfs - Release references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ngbe_put_vfs(struct ngbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct vf_data_storage *vfinfo; + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + + vfinfo = &adapter->vfinfo[vf]; + iounmap(vfinfo->b4_addr); + vfinfo->b4_addr = NULL; + vfinfo->vfdev = NULL; + /*pci_dev_put(vfdev);*/ + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ngbe_enable_sriov(struct ngbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + e_dev_warn("SR-IOV already disabled\n"); + return; + } + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device -" + "Please reload all VF drivers to avoid spoofed packet " + "errors\n"); + } else { + int err; + /* + * The sapphire supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, + NGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__ngbe_enable_sriov(adapter)) { + ngbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + ngbe_disable_sriov(adapter); +} +#endif /* CONFIG_PCI_IOV */ + +int ngbe_disable_sriov(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ +#ifdef CONFIG_PCI_IOV + ngbe_put_vfs(adapter); +#endif + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return 0; + +#if 0 +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif +#endif + + /* set default pool back to 0 */ + wr32m(hw, NGBE_PSR_VM_CTL, + NGBE_PSR_VM_CTL_POOL_MASK, 0); + NGBE_WRITE_FLUSH(hw); + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + /* take a breather then clean up driver data */ + msleep(100); + + adapter->flags &= ~NGBE_FLAG_SRIOV_ENABLED; + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + } + + return 0; +} + +static int ngbe_set_vf_multicasts(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u16 entries = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) + >> NGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + + /* only so many hash values supported */ + entries = min(entries, (u16)NGBE_MAX_VF_MC_ENTRIES); + + /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* VFs are limited to using the MTA hash table for their multicast + * addresses */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + /* errata 5: maintain a copy of the register table conf */ + mta_reg = hw->mac.mta_shadow[vector_reg]; + mta_reg |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] = mta_reg; + wr32(hw, NGBE_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); + + return 0; +} + +void ngbe_restore_vf_multicasts(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + u32 i, j; + u32 vector_bit; + u32 vector_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(i)); + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + wr32m(hw, NGBE_PSR_MC_TBL(vector_reg), + 1 << vector_bit, 1 << vector_bit); + /* errata 5: maintain a copy of the reg table conf */ + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, NGBE_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + ngbe_full_sync_mac_table(adapter); +} + +int ngbe_set_vf_vlan(struct ngbe_adapter *adapter, int add, int vid, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add); +} + +static int ngbe_set_vf_lpe(struct ngbe_adapter *adapter, u32 max_frame, + u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 max_frs, reg_val; + + /* + * For sapphire we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 vf_shift, vfre; + s32 err = 0; + + + + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + fallthrough; + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf; + + /* enable or disable receive depending on error */ + vfre = rd32(hw, NGBE_RDM_POOL_RE); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + wr32(hw, NGBE_RDM_POOL_RE, vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(hw, NGBE_MAC_WDG_TIMEOUT) & + NGBE_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + NGBE_MAC_WDG_TIMEOUT_WTO_DELTA)) { + wr32(hw, NGBE_MAC_WDG_TIMEOUT, + max_frs - NGBE_MAC_WDG_TIMEOUT_WTO_DELTA); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +void ngbe_set_vmolr(struct ngbe_hw *hw, u16 vf, bool aupe) +{ + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + vmolr |= NGBE_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= NGBE_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~NGBE_PSR_VM_L2CTL_AUPE; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); +} + +static void ngbe_set_vmvir(struct ngbe_adapter *adapter, + u16 vid, u16 qos, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + NGBE_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(hw, NGBE_TDM_VLAN_INS(vf), vmvir); +} + +static void ngbe_clear_vmvir(struct ngbe_adapter *adapter, u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_TDM_VLAN_INS(vf), 0); +} + +static inline void ngbe_vf_reset_event(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + ngbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ngbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ngbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ngbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ngbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ngbe_set_rx_mode(adapter->netdev); + + ngbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ngbe_mbox_api_10; +} + +int ngbe_set_vf_mac(struct ngbe_adapter *adapter, + u16 vf, unsigned char *mac_addr) +{ + s32 retval = 0; + ngbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = ngbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; +} + +static int ngbe_negotiate_vf_api(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ngbe_mbox_api_10: + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ngbe_get_vf_queues(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_20: + case ngbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[NGBE_VF_TX_QUEUES] = 1; + msgbuf[NGBE_VF_RX_QUEUES] = 1; + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[NGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[NGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[NGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[NGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int ngbe_get_vf_link_status(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + break; + default: + return -1; + } + + if (adapter->link_up) + msgbuf[1] = NGBE_VF_STATUS_LINKUP; + else + msgbuf[1] = 0; + + return 0; +} + +static int ngbe_set_vf_macvlan(struct ngbe_adapter *adapter, + u16 vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + ngbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list manangbeent required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + retval = ngbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +#ifdef CONFIG_PCI_IOV +int ngbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x7); + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static inline void ngbe_write_qde(struct ngbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + u32 reg = 0; + u32 i = vf * q_per_pool; + + reg = rd32(hw, NGBE_RDM_PF_QDE); + reg |= qde << i; + + wr32(hw, NGBE_RDM_PF_QDE, reg); + +} + +static inline void ngbe_write_hide_vlan(struct ngbe_adapter *adapter, u32 vf, + u32 hide_vlan) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + u32 reg = 0; + u32 i = vf * q_per_pool; + reg = rd32(hw, NGBE_RDM_PF_HIDE); + + if (hide_vlan == 1) + reg |= hide_vlan << i; + else + reg &= hide_vlan << i; + + wr32(hw, NGBE_RDM_PF_HIDE, reg); +} + +static int ngbe_vf_reset_msg(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + struct net_device *dev = adapter->netdev; + int pf_max_frame; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ngbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + ngbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf; + + /* enable transmit for vf */ + wr32m(hw, NGBE_TDM_POOL_TE, + 1 << vf, 1 << vf); + + /* force drop enable for all VF Rx queues */ + ngbe_write_qde(adapter, vf, 1); + + /* enable receive for vf */ + reg = rd32(hw, NGBE_RDM_POOL_RE); + reg |= 1 << vf_shift; + + pf_max_frame = dev->mtu + ETH_HLEN; + + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + wr32(hw, NGBE_RDM_POOL_RE, reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = NGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= NGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to " + "assign one manually\n", vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ngbe_write_mbx(hw, msgbuf, NGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ngbe_set_vf_mac_addr(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; + e_warn(drv, + "VF %d attempted to set a new MAC address but it already " + "has an administratively set MAC address " + "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + e_warn(drv, "Check the VF driver and if it is not using the " + "correct MAC address you may need to reload the VF " + "driver\n"); + return -1; + } + return ngbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +#ifdef CONFIG_PCI_IOV +static int ngbe_find_vlvf_entry(struct ngbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(hw, NGBE_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) + regindex = -1; + + return regindex; +} +#endif /* CONFIG_PCI_IOV */ + +static int ngbe_set_vf_vlan_msg(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >> NGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & NGBE_PSR_VLAN_SWC_VLANID_MASK); + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = ngbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = ngbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + u32 bits = 0, vlvf; + s32 reg_ndx; + + reg_ndx = ngbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(hw, NGBE_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << VMDQ_P(0)); + } else { + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && +#ifndef HAVE_VLAN_RX_REGISTER + !test_bit(vid, adapter->active_vlans) && +#endif + !bits) + ngbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif + return err; +} + +static int ngbe_set_vf_macvlan_msg(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >> + NGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } +#if defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + ngbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); +#endif /* defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) */ + } + + err = ngbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no " + "space for it\n", + vf); + + return err < 0; +} + +static int ngbe_update_vf_xcast_mode(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == NGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ngbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } +#if 0 /* trust all vf */ + if (xcast_mode > NGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = NGBEVF_XCAST_MODE_MULTI; + } +#endif /* trust all vf */ + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case NGBEVF_XCAST_MODE_NONE: + disable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case NGBEVF_XCAST_MODE_MULTI: + disable = NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE; + break; + case NGBEVF_XCAST_MODE_ALLMULTI: + disable = NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE; + break; + case NGBEVF_XCAST_MODE_PROMISC: + disable = 0; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int ngbe_rcv_msg_from_vf(struct ngbe_adapter *adapter, u16 vf) +{ + u16 mbx_size = NGBE_VXMAILBOX_SIZE; + u32 msgbuf[NGBE_VXMAILBOX_SIZE]; + struct ngbe_hw *hw = &adapter->hw; + s32 retval; + + retval = ngbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (NGBE_VT_MSGTYPE_ACK | NGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + NGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == NGBE_VF_RESET) + return ngbe_vf_reset_msg(adapter, vf); + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + ngbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case NGBE_VF_SET_MAC_ADDR: + retval = ngbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_MULTICAST: + retval = ngbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_VLAN: + retval = ngbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_LPE: + if (msgbuf[1] > NGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d exceed MAX_JUMBO_FRAME_SIZE\n", msgbuf[1]); + return -EINVAL; + } + retval = ngbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case NGBE_VF_SET_MACVLAN: + retval = ngbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case NGBE_VF_API_NEGOTIATE: + retval = ngbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case NGBE_VF_GET_QUEUES: + retval = ngbe_get_vf_queues(adapter, msgbuf, vf); + break; + case NGBE_VF_UPDATE_XCAST_MODE: + retval = ngbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + case NGBE_VF_GET_LINK_STATUS: + retval = ngbe_get_vf_link_status(adapter, msgbuf, vf); + break; + case NGBE_VF_BACKUP: + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = NGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= NGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= NGBE_VT_MSGTYPE_CTS; + + ngbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void ngbe_rcv_ack_from_vf(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 msg = NGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + ngbe_write_mbx(hw, &msg, 1, vf); +} + +void ngbe_msg_task(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ngbe_check_for_rst(hw, vf)) + ngbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!ngbe_check_for_msg(hw, vf)) + ngbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!ngbe_check_for_ack(hw, vf)) + ngbe_rcv_ack_from_vf(adapter, vf); + } +} + +void ngbe_disable_tx_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + wr32(hw, NGBE_TDM_POOL_TE, 0); + wr32(hw, NGBE_RDM_POOL_RE, 0); +} + +#ifdef HAVE_NDO_SET_VF_TRUST +static inline void ngbe_ping_vf(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 ping; + + ping = NGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, &ping, 1, vf); +} +#endif + +void ngbe_ping_all_vfs(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 ping; + u16 i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = NGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, &ping, 1, i); + } +} + +void ngbe_ping_all_vfs_with_link_status(struct ngbe_adapter *adapter, bool link_up) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 msgbuf[2]; + u16 i; + u32 link_speed = adapter->link_speed; + + msgbuf[0] = NGBE_NOFITY_VF_LINK_STATUS | NGBE_PF_CONTROL_MSG; + msgbuf[1] = (link_speed << 1) | link_up; + for (i = 0 ; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) + msgbuf[0] |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, msgbuf, 2, i); + } +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int ngbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ngbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} +#endif + +#ifdef CONFIG_PCI_IOV +static int ngbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (!(adapter->flags & NGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + e_dev_warn("SR-IOV already disabled\n"); + return -EOPNOTSUPP; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ngbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 8 we limit the actual number that can be allocated to 7 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if ((num_vfs + adapter->num_vmdqs) > NGBE_MAX_VF_FUNCTIONS) { + err = -EPERM; + goto err_out; + } + + adapter->num_vfs = num_vfs; + + err = __ngbe_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + ngbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + ngbe_get_vfs(adapter); + msleep(100); + ngbe_sriov_reinit(adapter); +out: + return num_vfs; +err_out: + return err; +} + +static int ngbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = ngbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) + ngbe_sriov_reinit(adapter); + + return err; +} +#endif + +int ngbe_pci_sriov_configure(struct pci_dev __maybe_unused *dev, + int __maybe_unused num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return ngbe_pci_sriov_disable(dev); + else + return ngbe_pci_sriov_enable(dev, num_vfs); +#else + return 0; +#endif +} + +#ifdef IFLA_VF_MAX +int ngbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + s32 retval = 0; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = ngbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__NGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF " + "device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to " + "use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address was NOT set due to invalid or " + "duplicate MAC address.\n"); + } + + return retval; +} + +static int ngbe_enable_port_vlan(struct ngbe_adapter *adapter, + int vf, u16 vlan, u8 qos) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + err = ngbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + ngbe_set_vmvir(adapter, vlan, qos, vf); + ngbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + /* enable hide vlan */ + ngbe_write_qde(adapter, vf, 1); + ngbe_write_hide_vlan(adapter, vf, 1); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__NGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not " + "up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF " + "device.\n"); + } + +out: + return err; +} + +static int ngbe_disable_port_vlan(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + err = ngbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ngbe_clear_vmvir(adapter, vf); + ngbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + /* disable hide vlan */ + ngbe_write_hide_vlan(adapter, vf, 0); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +#else +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif +{ + int err = 0; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = ngbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = ngbe_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + err = ngbe_disable_port_vlan(adapter, vf); + } +out: + return err; +} +#endif /* IFLA_VF_MAX */ +#if 0 +static void ngbe_set_vf_rate_limit(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ngbe_hw *hw = &adapter->hw; + u32 bcnrc_val; + u16 queue, queues_per_pool; + u16 max_tx_rate = adapter->vfinfo[vf].max_tx_rate; +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + u16 min_tx_rate = adapter->vfinfo[vf].min_tx_rate; +#endif + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + max_tx_rate /= queues_per_pool; + bcnrc_val = NGBE_TDM_RP_RATE_MAX(max_tx_rate); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + min_tx_rate /= queues_per_pool; + bcnrc_val |= NGBE_TDM_RP_RATE_MIN(min_tx_rate); +#endif + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, NGBE_TDM_MMW, 0x14); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + wr32(hw, NGBE_TDM_RP_IDX, reg_idx); + wr32(hw, NGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, NGBE_TDM_RP_CTL, + NGBE_TDM_RP_CTL_RLEN, NGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, NGBE_TDM_RP_CTL, + NGBE_TDM_RP_CTL_RLEN, 0); + } +} +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int ngbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate) +#else +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 1 or 10 Gbps */ + if (adapter->link_speed < NGBE_LINK_SPEED_1GB_FULL) + return -EINVAL; + + /* store values */ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + adapter->vfinfo[vf].min_tx_rate = min_tx_rate; +#endif + adapter->vfinfo[vf].max_tx_rate = max_tx_rate; + + return 0; +} + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int ngbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + if (vf < 32) { + regval = (setting << vf); + wr32m(hw, NGBE_TDM_MAC_AS_L, + regval | (1 << vf), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, NGBE_TDM_VLAN_AS_L, + regval | (1 << vf), regval); + } + } + + return 0; +} +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ +int ngbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + ivi->min_tx_rate = adapter->vfinfo[vf].min_tx_rate; +#else + ivi->tx_rate = adapter->vfinfo[vf].max_tx_rate; +#endif + + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = adapter->vfinfo[vf].trusted; +#endif + + return 0; +} +#endif /* IFLA_VF_MAX */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h new file mode 100644 index 000000000000..d283d82f61ed --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h @@ -0,0 +1,76 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_SRIOV_H_ +#define _NGBE_SRIOV_H_ + +/* ngbe driver limit the max number of VFs could be enabled to + * 7 (NGBE_MAX_VF_FUNCTIONS - 1) + */ +#define NGBE_MAX_VFS_DRV_LIMIT (NGBE_MAX_VF_FUNCTIONS - 1) + +void ngbe_restore_vf_multicasts(struct ngbe_adapter *adapter); +int ngbe_set_vf_vlan(struct ngbe_adapter *adapter, int add, int vid, u16 vf); +void ngbe_set_vmolr(struct ngbe_hw *hw, u16 vf, bool aupe); +void ngbe_msg_task(struct ngbe_adapter *adapter); +int ngbe_set_vf_mac(struct ngbe_adapter *adapter, + u16 vf, unsigned char *mac_addr); +void ngbe_disable_tx_rx(struct ngbe_adapter *adapter); +void ngbe_ping_all_vfs(struct ngbe_adapter *adapter); +void ngbe_ping_all_vfs_with_link_status(struct ngbe_adapter *adapter, bool link_up); + +#ifdef IFLA_VF_MAX +int ngbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); +#else +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); +#else +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int ngbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +int ngbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif +int ngbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +#endif /* IFLA_VF_MAX */ +int ngbe_disable_sriov(struct ngbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +int ngbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ngbe_enable_sriov(struct ngbe_adapter *adapter); +#endif +int ngbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); + +#define NGBE_VF_STATUS_LINKUP 0x1 + +/* + * These are defined in ngbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +//#define NGBE_DEV_ID_SP_VF 0x1000 +#endif /* _NGBE_SRIOV_H_ */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c new file mode 100644 index 000000000000..8a53796f80b8 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c @@ -0,0 +1,226 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef NGBE_SYSFS + +#include +#include +#include +#include +#include +#include +#include +#ifdef NGBE_HWMON +#include +#endif + +#ifdef NGBE_HWMON +/* hwmon callback functions */ +static ssize_t ngbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + struct ngbe_hw *hw = ngbe_attr->hw; + unsigned int value; + + /* reset the temp field */ + hw->mac.ops.get_thermal_sensor_data(hw); + + value = ngbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_alarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->alarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_dalarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->dalarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * ngbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ngbe_add_hwmon_attr(struct ngbe_adapter *adapter, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *ngbe_attr; + + n_attr = adapter->ngbe_hwmon_buff.n_hwmon; + ngbe_attr = &adapter->ngbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case NGBE_HWMON_TYPE_TEMP: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_temp; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_input", 0); + break; + case NGBE_HWMON_TYPE_ALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_alarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_alarmthresh", 0); + break; + case NGBE_HWMON_TYPE_DALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_dalarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_dalarmthresh", 0); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ngbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor; + ngbe_attr->hw = &adapter->hw; + ngbe_attr->dev_attr.store = NULL; + ngbe_attr->dev_attr.attr.mode = S_IRUGO; + ngbe_attr->dev_attr.attr.name = ngbe_attr->name; + + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &ngbe_attr->dev_attr); + + if (rc == 0) + ++adapter->ngbe_hwmon_buff.n_hwmon; + + return rc; +} +#endif /* NGBE_HWMON */ + +static void ngbe_sysfs_del_adapter( + struct ngbe_adapter __maybe_unused *adapter) +{ +#ifdef NGBE_HWMON + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->ngbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->ngbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ngbe_hwmon_buff.hwmon_list); + + if (adapter->ngbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ngbe_hwmon_buff.device); +#endif /* NGBE_HWMON */ +} + +/* called from ngbe_main.c */ +void ngbe_sysfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_sysfs_del_adapter(adapter); +} + +/* called from ngbe_main.c */ +int ngbe_sysfs_init(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int rc = 0; +#ifdef NGBE_HWMON + struct hwmon_buff *ngbe_hwmon = &adapter->ngbe_hwmon_buff; + int n_attrs; + +#endif /* NGBE_HWMON */ + if (adapter == NULL) + goto err; + +#ifdef NGBE_HWMON + + /* Don't create thermal hwmon interface if no sensors present */ + if (hw->mac.ops.init_thermal_sensor_thresh(hw)) + goto no_thermal; + + /* + * Allocation space for max attributs + * max num sensors * values (temp, alamthresh, dalarmthresh) + */ + n_attrs = 3; + ngbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!ngbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + ngbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(ngbe_hwmon->device)) { + rc = PTR_ERR(ngbe_hwmon->device); + goto err; + } + + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_TEMP); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_ALARMTHRESH); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_DALARMTHRESH); + if (rc) + goto err; + +no_thermal: +#endif /* NGBE_HWMON */ + goto exit; + +err: + ngbe_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif /* NGBE_SYSFS */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h new file mode 100644 index 000000000000..41034b0bf14e --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -0,0 +1,3030 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#ifndef _NGBE_TYPE_H_ +#define _NGBE_TYPE_H_ + +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - NGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - NGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - NGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - NGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - NGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - NGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ngbe_osdep.h" + +#define NGBE_NO_LRO + + +/* Override this by setting IOMEM in your ngbe_osdep.h header */ +#ifndef IOMEM +#define IOMEM +#endif + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif + +/************ ngbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_TRUSTNETIC +#define PCI_VENDOR_ID_TRUSTNETIC 0x8088 +#endif + +/* Device IDs */ +/* copper */ +#define NGBE_DEV_ID_EM_TEST 0x0000 +#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100 +#define NGBE_DEV_ID_EM_WX1860A2 0x0101 +#define NGBE_DEV_ID_EM_WX1860A2S 0x0102 +#define NGBE_DEV_ID_EM_WX1860A4 0x0103 +#define NGBE_DEV_ID_EM_WX1860A4S 0x0104 +#define NGBE_DEV_ID_EM_WX1860AL2 0x0105 +#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106 +#define NGBE_DEV_ID_EM_WX1860AL4 0x0107 +#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108 +#define NGBE_DEV_ID_EM_WX1860NCSI 0x0109 +#define NGBE_DEV_ID_EM_WX1860A1 0x010a +#define NGBE_DEV_ID_EM_WX1860A1L 0x010b + + + + +/* transfer units */ +#define NGBE_KB_TO_B 1024 + +/* Revision ID */ +#define NGBE_SP_MPW 1 + +/* Subsystem ID */ +#define NGBE_WX1860AL_INTERNAL 0x0410 +#define NGBE_WX1860AL_M88E1512_SFP 0x0403 +#define NGBE_WX1860AL_YT8521S_SFP 0x0460 + +#define NGBE_SUBSYSTEM_ID_EM_SF100F_LP 0x0103 +#define NGBE_SUBSYSTEM_ID_EM_SF100HF_LP 0x0103 +#define NGBE_SUBSYSTEM_ID_EM_SF200T 0x0201 +#define NGBE_SUBSYSTEM_ID_EM_SF200T_S 0x0210 +#define NGBE_SUBSYSTEM_ID_EM_SF400T 0x0401 +#define NGBE_SUBSYSTEM_ID_EM_SF400T_S 0x0410 +#define NGBE_SUBSYSTEM_ID_EM_SF200HT 0x0202 +#define NGBE_SUBSYSTEM_ID_EM_SF200HT_S 0x0220 +#define NGBE_SUBSYSTEM_ID_EM_SF400HT 0x0402 +#define NGBE_SUBSYSTEM_ID_EM_SF400HT_S 0x0420 +#define NGBE_SUBSYSTEM_ID_EM_SF200HXT 0x0230 +#define NGBE_SUBSYSTEM_ID_EM_SF400HXT 0x0430 +#define NGBE_SUBSYSTEM_ID_EM_SF400_OCP 0x0440 +#define NGBE_SUBSYSTEM_ID_EM_SF400_LY 0x0450 +#define NGBE_SUBSYSTEM_ID_EM_SF400_LY_YT 0x0470 + +#define M88E1512_SFP 0x0003 +#define OCP_CARD 0x0040 +#define LY_M88E1512_SFP 0x0050 +#define M88E1512_RJ45 0x0051 +#define M88E1512_MIX 0x0052 +#define YT8521S_SFP 0x0060 +#define LY_YT8521S_SFP 0x0070 +#define INTERNAL_YT8521S_SFP 0x0061 +#define YT8521S_SFP_GPIO 0x0062 +#define INTERNAL_YT8521S_SFP_GPIO 0x0064 +#define RGMII_FPGA 0x0080 + +#define OEM_MASK 0x00FF +#define INTERNAL_SFP_MASK 0x00FF + +#define NCSI_SUP 0x8000 +#define NCSI_SUP_MASK 0x8000 + +#define WOL_SUP 0x4000 +#define WOL_SUP_MASK 0x4000 + + +/* MDIO Manageable Devices (MMDs). */ +#define NGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ +#define NGBE_MDIO_PCS_DEV_TYPE 0x3 /* Physical Coding Sublayer*/ +#define NGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ +#define NGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ + +/* phy register definitions */ +/* VENDOR_SPECIFIC_1_DEV regs */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ + +/* AUTO_NEG_DEV regs */ +#define NGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define NGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define NGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Reg */ +#define NGBE_MDIO_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG RX LP Status + * Reg */ +#define NGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define NGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define NGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ + + +#define NGBE_MDIO_AUTO_NEG_1000BASE_EEE_ADVT 0x4 +#define NGBE_MDIO_AUTO_NEG_100BASE_EEE_ADVT 0x2 +#define NGBE_MDIO_AUTO_NEG_LP_1000BASE_CAP 0x8000 + +#define NGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define NGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define NGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define NGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define NGBE_MII_RESTART 0x200 +#define NGBE_MII_AUTONEG_COMPLETE 0x20 +#define NGBE_MII_AUTONEG_LINK_UP 0x04 +#define NGBE_MII_AUTONEG_REG 0x0 + +/* PHY_XS_DEV regs */ +#define NGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define NGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ + +/* Media-dependent registers. */ +#define NGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define NGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define NGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define NGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ + +#define NGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define NGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define NGBE_MDIO_PHY_SPEED_10M 0x0040 /* 10M capable */ + +#define NGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define NGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ + +#define NGBE_PHY_REVISION_MASK 0xFFFFFFF0U +#define NGBE_MAX_PHY_ADDR 32 + +#define NGBE_MDIO_CLAUSE_SELECT 0x11220 + +/* INTERNAL PHY CONTROL */ +#define NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET 31 +#define NGBE_INTERNAL_PHY_OFFSET_MAX 32 +#define NGBE_INTERNAL_PHY_ID 0x000732 + +#define NGBE_INTPHY_LED0 0x0010 +#define NGBE_INTPHY_LED1 0x0040 +#define NGBE_INTPHY_LED2 0x2000 + +#define NGBE_INTPHY_INT_LSC 0x0010 +#define NGBE_INTPHY_INT_ANC 0x0008 + + + + +/* PHY MDI STANDARD CONFIG */ +#define NGBE_MDI_PHY_ID1_OFFSET 2 +#define NGBE_MDI_PHY_ID2_OFFSET 3 +#define NGBE_MDI_PHY_ID_MASK 0xFFFFFC00U +#define NGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define NGBE_MDI_PHY_DUPLEX 0x0100 +#define NGBE_MDI_PHY_RESTART_AN 0x0200 +#define NGBE_MDI_PHY_ANE 0x1000 +#define NGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define NGBE_MDI_PHY_RESET 0x8000 + +#define NGBE_PHY_RST_WAIT_PERIOD 50 + +#define NGBE_M88E1512_PHY_ID 0x005043 +/* reg 18_0 */ +#define NGBE_M88E1512_INT_LSC 0x0400 +#define NGBE_M88E1512_INT_ANC 0x0800 +/* reg 18_3 */ +#define NGBE_M88E1512_INT_EN 0x0080 +#define NGBE_M88E1512_INT_POL 0x0800 + +/* reg 21_2 */ +#define NGBE_M88E1512_RGM_TTC 0x0010 +#define NGBE_M88E1512_RGM_RTC 0x0020 + +/* LED control */ +#define NGBE_M88E1512_LED1_CONF 0x6 +#define NGBE_M88E1512_LED0_CONF 0x1 + +/* LED polarity */ +#define NGBE_M88E1512_LED1_POL 0x1 +#define NGBE_M88E1512_LED0_POL 0x1 + +/* reg 4_0 ADV REG*/ +#define NGBE_M88E1512_10BASET_HALF 0x0020 +#define NGBE_M88E1512_10BASET_FULL 0x0040 +#define NGBE_M88E1512_100BASET_HALF 0x0080 +#define NGBE_M88E1512_100BASET_FULL 0x0100 + +/* reg 9_0 ADV REG*/ +#define NGBE_M88E1512_1000BASET_HALF 0x0100 +#define NGBE_M88E1512_1000BASET_FULL 0x0200 + +/* reg 19_0 INT status*/ +#define NGBE_M88E1512_ANC 0x0800 +#define NGBE_M88E1512_LSC 0x0400 + +/* yt8521s reg */ +#define NGBE_YT8521S_PHY_ID 0x011a +#define NGBE_YT8531S_PHY_ID 0xe91a + +#define NGBE_YT8521S_SDS_LINK_UP 0x4 +#define NGBE_YT8521S_SDS_LINK_DOWN 0x8 +#define NGBE_YT8521S_UTP_LINK_UP 0x400 +#define NGBE_YT8521S_UTP_LINK_DOWN 0x800 + +#define NGBE_YT8521S_PHY_SPEED_SELECT1 0x0040 +#define NGBE_YT8521S_PHY_SPEED_SELECT0 0x2000 +#define NGBE_YT8521S_PHY_DUPLEX 0x0100 +#define NGBE_YT8521S_PHY_RESET 0x8000 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410U +#define QT2022_PHY_ID 0x0043A400U +#define ATH_PHY_ID 0x03429050U +/* PHY FW revision */ +#define TNX_FW_REV 0xB +#define AQ_FW_REV 0x20 + +/* ETH PHY Registers */ +#define NGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define NGBE_SR_PCS_CTL2 0x30007 +#define NGBE_SR_PMA_MMD_CTL1 0x10000 +#define NGBE_SR_MII_MMD_CTL 0x1F0000 +#define NGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define NGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define NGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define NGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define NGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define NGBE_SR_AN_MMD_CTL 0x70000 +#define NGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define NGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define NGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define NGBE_VR_AN_KR_MODE_CL 0x78003 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 + +#define NGBE_PHY_MPLLA_CTL0 0x18071 +#define NGBE_PHY_MPLLA_CTL3 0x18077 +#define NGBE_PHY_MISC_CTL0 0x18090 +#define NGBE_PHY_VCO_CAL_LD0 0x18092 +#define NGBE_PHY_VCO_CAL_LD1 0x18093 +#define NGBE_PHY_VCO_CAL_LD2 0x18094 +#define NGBE_PHY_VCO_CAL_LD3 0x18095 +#define NGBE_PHY_VCO_CAL_REF0 0x18096 +#define NGBE_PHY_VCO_CAL_REF1 0x18097 +#define NGBE_PHY_RX_AD_ACK 0x18098 +#define NGBE_PHY_AFE_DFE_ENABLE 0x1805D +#define NGBE_PHY_DFE_TAP_CTL0 0x1805E +#define NGBE_PHY_RX_EQ_ATT_LVL0 0x18057 +#define NGBE_PHY_RX_EQ_CTL0 0x18058 +#define NGBE_PHY_RX_EQ_CTL 0x1805C +#define NGBE_PHY_TX_EQ_CTL0 0x18036 +#define NGBE_PHY_TX_EQ_CTL1 0x18037 +#define NGBE_PHY_TX_RATE_CTL 0x18034 +#define NGBE_PHY_RX_RATE_CTL 0x18054 +#define NGBE_PHY_TX_GEN_CTL2 0x18032 +#define NGBE_PHY_RX_GEN_CTL2 0x18052 +#define NGBE_PHY_RX_GEN_CTL3 0x18053 +#define NGBE_PHY_MPLLA_CTL2 0x18073 +#define NGBE_PHY_RX_POWER_ST_CTL 0x18055 +#define NGBE_PHY_TX_POWER_ST_CTL 0x18035 +#define NGBE_PHY_TX_GENCTRL1 0x18031 + +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R 0x0 +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X 0x1 +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK 0x3 +#define NGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G 0x0 +#define NGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK 0x2000 +#define NGBE_SR_PMA_MMD_CTL1_LB_EN 0x1 +#define NGBE_SR_MII_MMD_CTL_AN_EN 0x1000 +#define NGBE_SR_MII_MMD_CTL_RESTART_AN 0x0200 +#define NGBE_SR_AN_MMD_CTL_RESTART_AN 0x0200 +#define NGBE_SR_AN_MMD_CTL_ENABLE 0x1000 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10 + +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32 +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40 +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x46 +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56 +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF +#define NGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1 +#define NGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE +#define NGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00 +#define NGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344 +#define NGBE_PHY_VCO_CAL_LD0_OTHER 1360 +#define NGBE_PHY_VCO_CAL_LD0_MASK 0x1000 +#define NGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42 +#define NGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34 +#define NGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F +#define NGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10 +#define NGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1 +#define NGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF +#define NGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1 +#define NGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2 +#define NGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20 +#define NGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200 +#define NGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7 +#define NGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70 +#define NGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700 +#define NGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2 +#define NGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20 +#define NGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200 +#define NGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7 +#define NGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70 +#define NGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700 +#define NGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000 +#define NGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100 +#define NGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300 +#define NGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400 +#define NGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00 +#define NGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000 +#define NGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000 +#define NGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000 +#define NGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000 +#define NGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100 +#define NGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300 +#define NGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400 +#define NGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00 +#define NGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000 +#define NGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000 +#define NGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000 +#define NGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000 + +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700 + +#define NGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 +#define NGBE_PHY_INIT_DONE_POLLING_TIME 100 + +/**************** Global Registers ****************************/ +/* chip control Registers */ +#define NGBE_MIS_RST 0x1000C +#define NGBE_MIS_PWR 0x10000 +#define NGBE_MIS_CTL 0x10004 +#define NGBE_MIS_PF_SM 0x10008 +#define NGBE_MIS_PRB_CTL 0x10010 +#define NGBE_MIS_ST 0x10028 +#define NGBE_MIS_SWSM 0x1002C +#define NGBE_MIS_RST_ST 0x10030 + +#define NGBE_MIS_RST_SW_RST 0x00000001U +#define NGBE_MIS_RST_LAN0_RST 0x00000002U +#define NGBE_MIS_RST_LAN1_RST 0x00000004U +#define NGBE_MIS_RST_LAN2_RST 0x00000008U +#define NGBE_MIS_RST_LAN3_RST 0x00000010U +#define NGBE_MIS_RST_FW_RST 0x00000020U + +#define NGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U +#define NGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U +#define NGBE_MIS_RST_GLOBAL_RST 0x80000000U + +#define NGBE_MIS_PWR_LAN_ID(_r) ((0xF0000000U & (_r)) >> 28) +#define NGBE_MIS_PWR_LAN_ID_0 (1) +#define NGBE_MIS_PWR_LAN_ID_1 (2) +#define NGBE_MIS_PWR_LAN_ID_2 (3) +#define NGBE_MIS_PWR_LAN_ID_3 (4) + +#define NGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define NGBE_MIS_ST_MNG_VETO 0x00000100U +#define NGBE_MIS_ST_LAN0_ECC 0x00010000U +#define NGBE_MIS_ST_LAN1_ECC 0x00020000U +#define NGBE_MIS_ST_LAN2_ECC 0x00040000U +#define NGBE_MIS_ST_LAN3_ECC 0x00080000U +#define NGBE_MIS_ST_MNG_ECC 0x00100000U +#define NGBE_MIS_ST_PCORE_ECC 0x00200000U +#define NGBE_MIS_ST_PCIWRP_ECC 0x00400000U +#define NGBE_MIS_ST_PCIEPHY_ECC 0x00800000U +#define NGBE_MIS_ST_FMGR_ECC 0x01000000U +#define NGBE_MIS_ST_GPHY_IN_RST(_r) (0x00000200U << (_r)) + + +#define NGBE_MIS_SWSM_SMBI 1 +#define NGBE_MIS_RST_ST_DEV_RST_ST_DONE 0x00000000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_REQ 0x00080000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS 0x00100000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 +#define NGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define NGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define NGBE_MIS_RST_ST_RST_TIM 0x000000FFU +#define NGBE_MIS_PF_SM_SM 1 +#define NGBE_MIS_PRB_CTL_LAN0_UP 0x8 +#define NGBE_MIS_PRB_CTL_LAN1_UP 0x4 +#define NGBE_MIS_PRB_CTL_LAN2_UP 0x2 +#define NGBE_MIS_PRB_CTL_LAN3_UP 0x1 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define NGBE_TS_CTL 0x10300 +#define NGBE_TS_EN 0x10304 +#define NGBE_TS_ST 0x10308 +#define NGBE_TS_ALARM_THRE 0x1030C +#define NGBE_TS_DALARM_THRE 0x10310 +#define NGBE_TS_INT_EN 0x10314 +#define NGBE_TS_ALARM_ST 0x10318 +#define NGBE_TS_ALARM_ST_DALARM 0x00000002U +#define NGBE_TS_ALARM_ST_ALARM 0x00000001U + +#define NGBE_EFUSE_WDATA0 0x10320 +#define NGBE_EFUSE_WDATA1 0x10324 +#define NGBE_EFUSE_RDATA0 0x10328 +#define NGBE_EFUSE_RDATA1 0x1032C +#define NGBE_EFUSE_STATUS 0x10330 + + +#define NGBE_TS_CTL_CALI_DONE 0x80000000U +#define NGBE_TS_EN_ENA 0x00000001U +#define NGBE_TS_ST_DATA_OUT_MASK 0x000003FFU +#define NGBE_TS_ALARM_THRE_MASK 0x000003FFU +#define NGBE_TS_DALARM_THRE_MASK 0x000003FFU +#define NGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define NGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U + +struct ngbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct ngbe_thermal_sensor_data { + struct ngbe_thermal_diode_data sensor; +}; + + +/* FMGR Registers */ +#define NGBE_SPI_ILDR_STATUS 0x10120 +#define NGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define NGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset done */ +#define NGBE_SPI_ILDR_STATUS_SW_RESET 0x00000800U /* software reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00002000U /* lan0 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00004000U /* lan1 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN2_SW_RST 0x00008000U /* lan2 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN3_SW_RST 0x00010000U /* lan3 soft reset done */ + + +#define NGBE_MAX_FLASH_LOAD_POLL_TIME 10 + +#define NGBE_SPI_CMD 0x10104 +#define NGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define NGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define NGBE_SPI_CMD_ADDR(_v) (((_v) & 0x7FFFFF)) + +#define NGBE_SPI_DATA 0x10108 +#define NGBE_SPI_DATA_BYPASS ((0x1) << 31) +#define NGBE_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) +#define NGBE_SPI_DATA_OP_DONE ((0x1)) + +#define NGBE_SPI_STATUS 0x1010C +#define NGBE_SPI_STATUS_OPDONE ((0x1)) +#define NGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) + +#define NGBE_SPI_USR_CMD 0x10110 +#define NGBE_SPI_CMDCFG0 0x10114 +#define NGBE_SPI_CMDCFG1 0x10118 +#define NGBE_SPI_ILDR_SWPTR 0x10124 + +/************************* Port Registers ************************************/ + +/* port cfg Registers */ +#define NGBE_CFG_PORT_CTL 0x14400 +#define NGBE_CFG_PORT_ST 0x14404 +#define NGBE_CFG_EX_VTYPE 0x14408 +#define NGBE_CFG_LED_CTL 0x14424 + +/* internal phy reg_offset [0,31] */ +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) + +#define NGBE_CFG_TCP_TIME 0x14420 +#define NGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) /* [0,3] */ +#define NGBE_CFG_LAN_SPEED 0x14440 + + + +/* port cfg bit */ +#define NGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ +#define NGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define NGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U +#define NGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define NGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define NGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00001000U /* number of TVs */ +#define NGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define NGBE_CFG_PORT_CTL_NUM_VT_8 0x00001000U +/* Status Bit */ +#define NGBE_CFG_PORT_ST_LINK_1000M 0x00000002U +#define NGBE_CFG_PORT_ST_LINK_100M 0x00000004U +#define NGBE_CFG_PORT_ST_LINK_10M 0x00000008U +#define NGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000300U & (_r)) >> 8) +#define NGBE_LINK_UP_TIME 90 + +/* LED CTL Bit */ + +#define NGBE_CFG_LED_CTL_LINK_10M_SEL 0x00000008U +#define NGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000004U +#define NGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000002U +#define NGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 +/* LED modes */ +#define NGBE_LED_LINK_10M NGBE_CFG_LED_CTL_LINK_10M_SEL +#define NGBE_LED_LINK_1G NGBE_CFG_LED_CTL_LINK_1G_SEL +#define NGBE_LED_LINK_100M NGBE_CFG_LED_CTL_LINK_100M_SEL + +/* GPIO Registers */ +#define NGBE_GPIO_DR 0x14800 +#define NGBE_GPIO_DDR 0x14804 +#define NGBE_GPIO_CTL 0x14808 +#define NGBE_GPIO_INTEN 0x14830 +#define NGBE_GPIO_INTMASK 0x14834 +#define NGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define NGBE_GPIO_POLARITY 0x1483C +#define NGBE_GPIO_INTSTATUS 0x14840 +#define NGBE_GPIO_EOI 0x1484C +/*GPIO bit */ +#define NGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ +#define NGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ +#define NGBE_GPIO_DDR_0 0x00000001U /* SDP0 IO direction */ +#define NGBE_GPIO_DDR_1 0x00000002U /* SDP1 IO direction */ +#define NGBE_GPIO_CTL_SW_MODE 0x00000000U /* SDP software mode */ +#define NGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ +#define NGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ +#define NGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define NGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ +#define NGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ +#define NGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define NGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ +#define NGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ +#define NGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define NGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ +#define NGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ +#define NGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ +#define NGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define NGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ +#define NGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ + +/* TPH registers */ +#define NGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define NGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define NGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define NGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ +/* TPH bit */ +#define NGBE_CFG_TPH_TDESC_EN 0x80000000U +#define NGBE_CFG_TPH_TDESC_PH_SHIFT 29 +#define NGBE_CFG_TPH_TDESC_ST_SHIFT 16 +#define NGBE_CFG_TPH_RDESC_EN 0x80000000U +#define NGBE_CFG_TPH_RDESC_PH_SHIFT 29 +#define NGBE_CFG_TPH_RDESC_ST_SHIFT 16 +#define NGBE_CFG_TPH_RHDR_EN 0x00008000U +#define NGBE_CFG_TPH_RHDR_PH_SHIFT 13 +#define NGBE_CFG_TPH_RHDR_ST_SHIFT 0 +#define NGBE_CFG_TPH_RPL_EN 0x80000000U +#define NGBE_CFG_TPH_RPL_PH_SHIFT 29 +#define NGBE_CFG_TPH_RPL_ST_SHIFT 16 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define NGBE_TDM_CTL 0x18000 +#define NGBE_TDM_POOL_TE 0x18004 +#define NGBE_TDM_PB_THRE 0x18020 + + + +#define NGBE_TDM_LLQ 0x18040 +#define NGBE_TDM_ETYPE_LB_L 0x18050 + +#define NGBE_TDM_ETYPE_AS_L 0x18058 +#define NGBE_TDM_MAC_AS_L 0x18060 + +#define NGBE_TDM_VLAN_AS_L 0x18070 + +#define NGBE_TDM_TCP_FLG_L 0x18078 +#define NGBE_TDM_TCP_FLG_H 0x1807C +#define NGBE_TDM_DESC_FATAL 0x180D0 +#define NGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 8 of these 0 - 7 */ +/* TDM CTL BIT */ +#define NGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ +#define NGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ +#define NGBE_TDM_CTL_VT_SHIFT 16 /* VLAN EtherType */ +/* Per VF Port VLAN insertion rules */ +#define NGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ +#define NGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ + +#define NGBE_TDM_RP_CTL_RST ((0x1) << 0) +#define NGBE_TDM_RP_CTL_RPEN ((0x1) << 2) +#define NGBE_TDM_RP_CTL_RLEN ((0x1) << 3) +#define NGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) +#define NGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) + +/* qos */ +#define NGBE_TDM_PBWARB_CTL 0x18200 +#define NGBE_TDM_VM_CREDIT_VAL(v) (0x3FF & (v)) + +/* etag */ +#define NGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 8 of these 0 - 7 */ +/* statistic */ +#define NGBE_TDM_DRP_CNT 0x18300 +#define NGBE_TDM_SEC_DRP 0x18304 +#define NGBE_TDM_PKT_CNT 0x18308 +#define NGBE_TDM_BYTE_CNT_L 0x1830C +#define NGBE_TDM_BYTE_CNT_H 0x18310 +#define NGBE_TDM_OS2BMC_CNT 0x18314 + +/**************************** Receive DMA registers **************************/ +/* receive control */ +#define NGBE_RDM_ARB_CTL 0x12000 +#define NGBE_RDM_POOL_RE 0x12004 + +#define NGBE_RDM_PF_QDE 0x12080 +#define NGBE_RDM_PF_HIDE 0x12090 +/* VFRE bitmask */ +#define NGBE_RDM_POOL_RE_ENABLE_ALL 0xFFFFFFFFU + +/* statistic */ +#define NGBE_RDM_DRP_PKT 0x12500 +#define NGBE_RDM_PKT_CNT 0x12504 +#define NGBE_RDM_BYTE_CNT_L 0x12508 +#define NGBE_RDM_BYTE_CNT_H 0x1250C +#define NGBE_RDM_BMC2OS_CNT 0x12510 + +/***************************** RDB registers *********************************/ +/* Flow Control Registers */ +#define NGBE_RDB_RFCV 0x19200 +#define NGBE_RDB_RFCL 0x19220 +#define NGBE_RDB_RFCH 0x19260 +#define NGBE_RDB_RFCRT 0x192A0 +#define NGBE_RDB_RFCC 0x192A4 +/* receive packet buffer */ +#define NGBE_RDB_PB_WRAP 0x19004 +#define NGBE_RDB_PB_SZ 0x19020 + +#define NGBE_RDB_PB_CTL 0x19000 +#define NGBE_RDB_PB_SZ_SHIFT 10 +#define NGBE_RDB_PB_SZ_MASK 0x000FFC00U +/* lli interrupt */ +#define NGBE_RDB_LLI_THRE 0x19080 +#define NGBE_RDB_LLI_THRE_SZ(_v) ((0xFFF & (_v))) +#define NGBE_RDB_LLI_THRE_UP(_v) ((0x7 & (_v)) << 16) +#define NGBE_RDB_LLI_THRE_UP_SHIFT 16 + +/* ring assignment */ +#define NGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) /* [0,7] */ +#define NGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) /* [0,31] */ +#define NGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) /* [0,9] */ +#define NGBE_RDB_RA_CTL 0x194F4 +#define NGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define NGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define NGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define NGBE_RDB_SYN_CLS 0x19130 +#define NGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*8 of these (0-7)*/ +/* VM RSS */ +#define NGBE_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40)) +#define NGBE_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40)) +/* statistic */ +#define NGBE_RDB_MPCNT 0x19040 +#define NGBE_RDB_PKT_CNT 0x19060 +#define NGBE_RDB_REPLI_CNT 0x19064 +#define NGBE_RDB_DRP_CNT 0x19068 +#define NGBE_RDB_LXONTXC 0x1921C +#define NGBE_RDB_LXOFFTXC 0x19218 +#define NGBE_RDB_PFCMACDAL 0x19210 +#define NGBE_RDB_PFCMACDAH 0x19214 +#define NGBE_RDB_TXSWERR 0x1906C +#define NGBE_RDB_TXSWERR_TB_FREE 0x3FF +/* rdb_pl_cfg reg mask */ +#define NGBE_RDB_PL_CFG_L4HDR 0x2 +#define NGBE_RDB_PL_CFG_L3HDR 0x4 +#define NGBE_RDB_PL_CFG_L2HDR 0x8 +#define NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define NGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 +/* RQTC Bit Masks and Shifts */ +#define NGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) +#define NGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) +#define NGBE_RDB_RSS_TC_TC1_MASK (0x7 << 4) +#define NGBE_RDB_RSS_TC_TC2_MASK (0x7 << 8) +#define NGBE_RDB_RSS_TC_TC3_MASK (0x7 << 12) +#define NGBE_RDB_RSS_TC_TC4_MASK (0x7 << 16) +#define NGBE_RDB_RSS_TC_TC5_MASK (0x7 << 20) +#define NGBE_RDB_RSS_TC_TC6_MASK (0x7 << 24) +#define NGBE_RDB_RSS_TC_TC7_MASK (0x7 << 28) +/* Packet Buffer Initialization */ +#define NGBE_MAX_PACKET_BUFFERS 8 +#define NGBE_RDB_PB_SZ_48KB 0x00000030U /* 48KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_64KB 0x00000040U /* 64KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_80KB 0x00000050U /* 80KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_128KB 0x00000080U /* 128KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_MAX 0x00000200U /* 512KB Packet Buffer */ + + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + + +/* FCRTL Bit Masks */ +#define NGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define NGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ +/* FCCFG Bit Masks */ +#define NGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define NGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ +#define NGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define NGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ +#define NGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ + +#define NGBE_MAX_RDB_5T_CTL0_FILTERS 128 +#define NGBE_RDB_5T_CTL0_PROTOCOL_MASK 0x00000003U +#define NGBE_RDB_5T_CTL0_PROTOCOL_TCP 0x00000000U +#define NGBE_RDB_5T_CTL0_PROTOCOL_UDP 0x00000001U +#define NGBE_RDB_5T_CTL0_PROTOCOL_SCTP 2 +#define NGBE_RDB_5T_CTL0_PRIORITY_MASK 0x00000007U +#define NGBE_RDB_5T_CTL0_PRIORITY_SHIFT 2 +#define NGBE_RDB_5T_CTL0_POOL_MASK 0x0000003FU +#define NGBE_RDB_5T_CTL0_POOL_SHIFT 8 +#define NGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK 0x00000007U +#define NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT 27 +#define NGBE_RDB_5T_CTL0_SOURCE_PORT_MASK 0x1B +#define NGBE_RDB_5T_CTL0_DEST_PORT_MASK 0x05 +#define NGBE_RDB_5T_CTL0_PROTOCOL_COMP_MASK 0x0F +#define NGBE_RDB_5T_CTL0_POOL_MASK_EN 0x40000000U +#define NGBE_RDB_5T_CTL0_QUEUE_ENABLE 0x80000000U + +#define NGBE_RDB_ETYPE_CLS_RX_QUEUE 0x007F0000U /* bits 22:16 */ +#define NGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT 16 +#define NGBE_RDB_ETYPE_CLS_LLI 0x20000000U /* bit 29 */ +#define NGBE_RDB_ETYPE_CLS_QUEUE_EN 0x80000000U /* bit 31 */ + +/* Receive Config masks */ +#define NGBE_RDB_PB_CTL_PBEN (0x80000000) /* Enable Receiver */ +#define NGBE_RDB_PB_CTL_DISABLED 0x1 + +#define NGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define NGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define NGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define NGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define NGBE_PSR_CTL 0x15000 +#define NGBE_PSR_VLAN_CTL 0x15088 +#define NGBE_PSR_VM_CTL 0x151B0 +#define NGBE_PSR_PKT_CNT 0x151B8 +#define NGBE_PSR_MNG_PKT_CNT 0x151BC +#define NGBE_PSR_DBG_DOP_CNT 0x151C0 +#define NGBE_PSR_MNG_DOP_CNT 0x151C4 +#define NGBE_PSR_VM_FLP_L 0x151C8 + +/* Header split receive */ +#define NGBE_PSR_CTL_SW_EN 0x00040000U +#define NGBE_PSR_CTL_PCSD 0x00002000U +#define NGBE_PSR_CTL_IPPCSE 0x00001000U +#define NGBE_PSR_CTL_BAM 0x00000400U +#define NGBE_PSR_CTL_UPE 0x00000200U +#define NGBE_PSR_CTL_MPE 0x00000100U +#define NGBE_PSR_CTL_MFE 0x00000080U +#define NGBE_PSR_CTL_MO 0x00000060U +#define NGBE_PSR_CTL_TPE 0x00000010U +#define NGBE_PSR_CTL_MO_SHIFT 5 +/* VT_CTL bitmasks */ +#define NGBE_PSR_VM_CTL_DIS_DEFPL 0x20000000U /* disable default pool */ +#define NGBE_PSR_VM_CTL_REPLEN 0x40000000U /* replication enabled */ +#define NGBE_PSR_VM_CTL_POOL_SHIFT 7 +#define NGBE_PSR_VM_CTL_POOL_MASK (0x7 << NGBE_PSR_VM_CTL_POOL_SHIFT) +/* VLAN Control Bit Masks */ +#define NGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define NGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define NGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define NGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define NGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +/* VMOLR bitmasks */ +#define NGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define NGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define NGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define NGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define NGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define NGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define NGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define NGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define NGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define NGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define NGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* etype switcher 1st stage */ +#define NGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define NGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define NGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define NGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define NGBE_PSR_ETYPE_SWC_POOL_ENABLE (1 << 26) /* bit 26 */ +#define NGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define NGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define NGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define NGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define NGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define NGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define NGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/* mcasst/ucast overflow tbl */ +#define NGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define NGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* vlan tbl */ +#define NGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define NGBE_PSR_MAC_SWC_AD_L 0x16200 +#define NGBE_PSR_MAC_SWC_AD_H 0x16204 +#define NGBE_PSR_MAC_SWC_VM 0x16208 +#define NGBE_PSR_MAC_SWC_IDX 0x16210 +/* RAH */ +#define NGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define NGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define NGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define NGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define NGBE_PSR_VLAN_SWC 0x16220 +#define NGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define NGBE_PSR_VLAN_SWC_IDX 0x16230 /* 32 vlan entries */ +/* VLAN pool filtering masks */ +#define NGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ +#define NGBE_PSR_VLAN_SWC_ENTRIES 32 +#define NGBE_PSR_VLAN_SWC_VLANID_MASK 0x00000FFFU +#define NGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* Manangbeent */ +#define NGBE_PSR_MNG_FIT_CTL 0x15820 +/* Manangbeent Bit Fields and Masks */ +#define NGBE_PSR_MNG_FIT_CTL_MPROXYE 0x40000000U /* Manangbeent Proxy Enable*/ +#define NGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */ +#define NGBE_PSR_MNG_FIT_CTL_EN_BMC2OS 0x10000000U /* Ena BMC2OS and OS2BMC + *traffic */ +#define NGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT 28 + +#define NGBE_PSR_MNG_FLEX_SEL 0x1582C +#define NGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define NGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define NGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define NGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) + +/* 1588 */ +#define NGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define NGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define NGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define NGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define NGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define NGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define NGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define NGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define NGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define NGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define NGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define NGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define NGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define NGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define NGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define NGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define NGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define NGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define NGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define NGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define NGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define NGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* Wake up registers */ +#define NGBE_PSR_WKUP_CTL 0x15B80 +#define NGBE_PSR_WKUP_IPV 0x15B84 +#define NGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define NGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define NGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_CTL 0x15CFC +/* Wake Up Filter Control Bit */ +#define NGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define NGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset + * assertion */ +/* Mask for Ext. flex filters */ +#define NGBE_PSR_WKUP_CTL_EXT_FLX_FILTERS 0x00300000U +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS 0x000F00FFU /* Mask all 4 flex filters*/ +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS_6 0x003F00FFU /* Mask all 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS_8 0x00FF00FFU /* Mask all 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_OFFSET 16 /* Offset to the Flex Filters bits*/ + +#define NGBE_PSR_MAX_SZ 0x15020 + +/****************************** TDB ******************************************/ +#define NGBE_TDB_TFCS 0x1CE00 +#define NGBE_TDB_PB_SZ 0x1CC00 + +#define NGBE_TDB_PRB_CTL 0x17010 +#define NGBE_TDB_PBRARB_CTL 0x1CD00 + +#define NGBE_TDB_PB_SZ_MAX 0x00005000U /* 20KB Packet Buffer */ +#define NGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define NGBE_MAX_PB 8 +/* statistic */ +#define NGBE_TDB_OUT_PKT_CNT 0x1CF00 +#define NGBE_TDB_MNG_PKT_CNT 0x1CF04 +#define NGBE_TDB_LB_PKT_CNT 0x1CF08 +#define NGBE_TDB_MNG_LARGE_DOP_CNT 0x1CF0C + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define NGBE_TSEC_CTL 0x1D000 +#define NGBE_TSEC_ST 0x1D004 +#define NGBE_TSEC_BUF_AF 0x1D008 +#define NGBE_TSEC_BUF_AE 0x1D00C +#define NGBE_TSEC_MIN_IFG 0x1D020 + +/* 1588 */ +#define NGBE_TSEC_1588_CTL 0x11F00 /* Tx Time Sync Control reg */ +#define NGBE_TSEC_1588_STMPL 0x11F04 /* Tx timestamp value Low */ +#define NGBE_TSEC_1588_STMPH 0x11F08 /* Tx timestamp value High */ +#define NGBE_TSEC_1588_SYSTIML 0x11F0C /* System time register Low */ +#define NGBE_TSEC_1588_SYSTIMH 0x11F10 /* System time register High */ +#define NGBE_TSEC_1588_INC 0x11F14 /* Increment attributes reg */ +#define NGBE_TSEC_1588_INC_IV(v) ((v) & 0x7FFFFFF) + +#define NGBE_TSEC_1588_ADJL 0x11F18 /* Time Adjustment Offset reg Low */ +#define NGBE_TSEC_1588_ADJH 0x11F1C /* Time Adjustment Offset reg High*/ + +#define NGBE_TSEC_1588_INT_ST 0x11F20 +#define NGBE_TSEC_1588_INT_EN 0x11F24 + +/* 1588 fields */ +#define NGBE_TSEC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define NGBE_TSEC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +#define NGBE_TSEC_1588_AUX_CTL 0x11F28 +#define NGBE_TSEC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_L(i) (0x11F3C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_H(i) (0x11F40 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_L(i) (0x11F4C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_H(i) (0x11F50 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_SDP(n) (0x11F5C + ((n) * 4)) /* [0,3] */ + + + +/********************************* RSEC **************************************/ +/* general rsec */ +#define NGBE_RSEC_CTL 0x17000 +#define NGBE_RSEC_ST 0x17004 +/* general rsec fields */ +#define NGBE_RSEC_CTL_SECRX_DIS 0x00000001U +#define NGBE_RSEC_CTL_RX_DIS 0x00000002U +#define NGBE_RSEC_CTL_CRC_STRIP 0x00000004U +#define NGBE_RSEC_CTL_SAVE_MAC_ERR 0x00000040U +#define NGBE_RSEC_ST_RSEC_RDY 0x00000001U +#define NGBE_RSEC_ST_RSEC_OFLD_DIS 0x00000002U +#define NGBE_RSEC_ST_ECC_RXERR 0x00000004U + +/* link sec */ +#define NGBE_RSEC_LSEC_CAP 0x17200 +#define NGBE_RSEC_LSEC_CTL 0x17204 +#define NGBE_RSEC_LSEC_SCI_L 0x17208 +#define NGBE_RSEC_LSEC_SCI_H 0x1720C +#define NGBE_RSEC_LSEC_SA0 0x17210 +#define NGBE_RSEC_LSEC_SA1 0x17214 +#define NGBE_RSEC_LSEC_PKNUM0 0x17218 +#define NGBE_RSEC_LSEC_PKNUM1 0x1721C +#define NGBE_RSEC_LSEC_KEY0(_n) 0x17220 +#define NGBE_RSEC_LSEC_KEY1(_n) 0x17230 +#define NGBE_RSEC_LSEC_UNTAG_PKT 0x17240 +#define NGBE_RSEC_LSEC_DEC_OCTET 0x17244 +#define NGBE_RSEC_LSEC_VLD_OCTET 0x17248 +#define NGBE_RSEC_LSEC_BAD_PKT 0x1724C +#define NGBE_RSEC_LSEC_NOSCI_PKT 0x17250 +#define NGBE_RSEC_LSEC_UNSCI_PKT 0x17254 +#define NGBE_RSEC_LSEC_UNCHK_PKT 0x17258 +#define NGBE_RSEC_LSEC_DLY_PKT 0x1725C +#define NGBE_RSEC_LSEC_LATE_PKT 0x17260 +#define NGBE_RSEC_LSEC_OK_PKT(_n) 0x17264 +#define NGBE_RSEC_LSEC_INV_PKT(_n) 0x17274 +#define NGBE_RSEC_LSEC_BADSA_PKT 0x1727C +#define NGBE_RSEC_LSEC_INVSA_PKT 0x17280 + +/* ipsec */ +#define NGBE_RSEC_IPS_IDX 0x17100 +#define NGBE_RSEC_IPS_IDX_WT 0x80000000U +#define NGBE_RSEC_IPS_IDX_RD 0x40000000U +#define NGBE_RSEC_IPS_IDX_TB_IDX 0x0U /* */ +#define NGBE_RSEC_IPS_IDX_TB_IP 0x00000002U +#define NGBE_RSEC_IPS_IDX_TB_SPI 0x00000004U +#define NGBE_RSEC_IPS_IDX_TB_KEY 0x00000006U +#define NGBE_RSEC_IPS_IDX_EN 0x00000001U +#define NGBE_RSEC_IPS_IP(i) (0x17104 + ((i) * 4)) +#define NGBE_RSEC_IPS_SPI 0x17114 +#define NGBE_RSEC_IPS_IP_IDX 0x17118 +#define NGBE_RSEC_IPS_KEY(i) (0x1711C + ((i) * 4)) +#define NGBE_RSEC_IPS_SALT 0x1712C +#define NGBE_RSEC_IPS_MODE 0x17130 +#define NGBE_RSEC_IPS_MODE_IPV6 0x00000010 +#define NGBE_RSEC_IPS_MODE_DEC 0x00000008 +#define NGBE_RSEC_IPS_MODE_ESP 0x00000004 +#define NGBE_RSEC_IPS_MODE_AH 0x00000002 +#define NGBE_RSEC_IPS_MODE_VALID 0x00000001 + +/************************************** ETH PHY ******************************/ +#define NGBE_XPCS_IDA_ADDR 0x13000 +#define NGBE_XPCS_IDA_DATA 0x13004 +#define NGBE_ETHPHY_IDA_ADDR 0x13008 +#define NGBE_ETHPHY_IDA_DATA 0x1300C + +/************************************** MNG ********************************/ +#define NGBE_MNG_FW_SM 0x1E000 +#define NGBE_MNG_SWFW_SYNC 0x1E008 +#define NGBE_MNG_MBOX 0x1E100 +#define NGBE_MNG_MBOX_CTL 0x1E044 + + +#define NGBE_MNG_OS2BMC_CNT 0x1E094 +#define NGBE_MNG_BMC2OS_CNT 0x1E090 + +/* Firmware Semaphore Register */ +#define NGBE_MNG_FW_SM_MODE_MASK 0xE +#define NGBE_MNG_FW_SM_TS_ENABLED 0x1 + +/* SW_FW_SYNC definitions */ +#define NGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define NGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define NGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define NGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define NGBE_MNG_MBOX_CTL_SWACK 0x2 +#define NGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define NGBE_MNG_MBOX_CTL_FWACK 0x8 + +/************************************* ETH MAC *****************************/ +#define NGBE_MAC_TX_CFG 0x11000 +#define NGBE_MAC_RX_CFG 0x11004 +#define NGBE_MAC_PKT_FLT 0x11008 +#define NGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define NGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define NGBE_MAC_WDG_TIMEOUT 0x1100C +#define NGBE_MAC_TX_FLOW_CTRL 0x11070 +#define NGBE_MAC_RX_FLOW_CTRL 0x11090 +#define NGBE_MAC_INT_ST 0x110B0 +#define NGBE_MAC_INT_EN 0x110B4 +#define NGBE_MAC_ADDRESS0_HIGH 0x11300 +#define NGBE_MAC_ADDRESS0_LOW 0x11304 + +#define NGBE_MAC_TX_CFG_TE 0x00000001U +#define NGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define NGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define NGBE_MAC_RX_CFG_RE 0x00000001U +#define NGBE_MAC_RX_CFG_JE 0x00000100U +#define NGBE_MAC_RX_CFG_LM 0x00000400U +#define NGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U +#define NGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU +#define NGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 + +#define NGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ + +#define NGBE_MSCA 0x11200 +#define NGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define NGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define NGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define NGBE_MSCC 0x11204 +#define NGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define NGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +enum NGBE_MSCA_CMD_value { + NGBE_MSCA_CMD_RSV = 0, + NGBE_MSCA_CMD_WRITE, + NGBE_MSCA_CMD_POST_READ, + NGBE_MSCA_CMD_READ, +}; +#define NGBE_MSCC_SADDR ((0x1U) << 18) +#define NGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define NGBE_MSCC_BUSY ((0x1U) << 22) +#define NGBE_MDIO_CLK(v) ((0x7 & (v)) << 19) + + +/* EEE registers */ + +/* statistic */ +#define NGBE_MAC_LXOFFRXC 0x11988 +#define NGBE_MAC_PXOFFRXC 0x119DC +#define NGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define NGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define NGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define NGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define NGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define NGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define NGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define NGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define NGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define NGBE_MMC_CONTROL 0x11800 +#define NGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define NGBE_MMC_CONTROL_UP 0x700 + + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define NGBE_BME_CTL 0x12020 +#define NGBE_PX_MISC_IC 0x100 +#define NGBE_PX_MISC_ICS 0x104 +#define NGBE_PX_MISC_IEN 0x108 +#define NGBE_PX_MISC_IVAR 0x4FC +#define NGBE_PX_GPIE 0x118 +#define NGBE_PX_ISB_ADDR_L 0x160 +#define NGBE_PX_ISB_ADDR_H 0x164 +#define NGBE_PX_TCP_TIMER 0x170 +#define NGBE_PX_ITRSEL 0x180 +#define NGBE_PX_IC 0x120 +#define NGBE_PX_ICS 0x130 +#define NGBE_PX_IMS 0x140 +#define NGBE_PX_IMC 0x150 +#define NGBE_PX_IVAR(_i) (0x500 + (_i) * 4) /* [0,3] */ +#define NGBE_PX_ITR(_i) (0x200 + (_i) * 4) /* [0,8] */ +#define NGBE_PX_TRANSACTION_PENDING 0x168 +#define NGBE_PX_INTA 0x110 + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define NGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define NGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ +#define NGBE_PX_MISC_IC_STALL 0x00001000U /* trans or recv path is + * stalled */ +#define NGBE_PX_MISC_IC_LINKSEC 0x00002000U /* Tx LinkSec require key + * exchange */ +#define NGBE_PX_MISC_IC_RX_MISS 0x00004000U /* Packet Buffer Overrun */ +#define NGBE_PX_MISC_IC_I2C 0x00010000U /* I2C interrupt */ +#define NGBE_PX_MISC_IC_ETH_EVENT 0x00020000U /* err reported by MAC except + * eth link down */ +#define NGBE_PX_MISC_IC_PHY 0x00040000U /* link up */ +#define NGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ +#define NGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define NGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ +#define NGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ +#define NGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ +#define NGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ +#define NGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ +#define NGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +#define NGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ + +/* Extended Interrupt Cause Set */ +#define NGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_ICS_DEV_RST 0x00000400U +#define NGBE_PX_MISC_ICS_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_ICS_STALL 0x00001000U +#define NGBE_PX_MISC_ICS_LINKSEC 0x00002000U +#define NGBE_PX_MISC_ICS_RX_MISS 0x00004000U +#define NGBE_PX_MISC_ICS_FLOW_DIR 0x00008000U +#define NGBE_PX_MISC_ICS_I2C 0x00010000U +#define NGBE_PX_MISC_ICS_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_ICS_ETH_LK 0x00040000U +#define NGBE_PX_MISC_ICS_ETH_AN 0x00080000U +#define NGBE_PX_MISC_ICS_INT_ERR 0x00100000U +#define NGBE_PX_MISC_ICS_SPI 0x00200000U +#define NGBE_PX_MISC_ICS_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_ICS_GPIO 0x04000000U +#define NGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_ICS_TIMER 0x80000000U + +/* Extended Interrupt Enable Set */ +#define NGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define NGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_IEN_STALL 0x00001000U +#define NGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define NGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define NGBE_PX_MISC_IEN_I2C 0x00010000U +#define NGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define NGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define NGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define NGBE_PX_MISC_IEN_SPI 0x00200000U +#define NGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_IEN_GPIO 0x04000000U +#define NGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_IEN_TIMER 0x80000000U + +#define NGBE_PX_MISC_IEN_MASK ( \ + NGBE_PX_MISC_IEN_ETH_LKDN| \ + NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_ETH_EVENT | \ + NGBE_PX_MISC_IEN_ETH_LK | \ + NGBE_PX_MISC_IEN_ETH_AN | \ + NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IEN_VF_MBOX | \ + NGBE_PX_MISC_IEN_GPIO | \ + NGBE_PX_MISC_IEN_MNG_HOST_MBOX | \ + NGBE_PX_MISC_IEN_STALL | \ + NGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + NGBE_PX_MISC_IEN_TIMER) + +/* General purpose Interrupt Enable */ +#define NGBE_PX_GPIE_MODEL 0x00000001U +#define NGBE_PX_GPIE_IMEN 0x00000002U +#define NGBE_PX_GPIE_LL_INTERVAL 0x000000F0U + +/* Interrupt Vector Allocation Registers */ +#define NGBE_PX_IVAR_REG_NUM 64 +#define NGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define NGBE_MAX_INT_RATE 500000 +#define NGBE_MIN_INT_RATE 980 +#define NGBE_MAX_EITR 0x00007FFCU +#define NGBE_MIN_EITR 4 +#define NGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U +#define NGBE_PX_ITR_LLI_CREDIT 0x001f0000U +#define NGBE_PX_ITR_LLI_MOD 0x00008000U +#define NGBE_PX_ITR_CNT_WDIS 0x80000000U +#define NGBE_PX_ITR_ITR_CNT 0x0FE00000U + +/* transmit DMA Registers */ +#define NGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define NGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define NGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define NGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +/* Transmit Config masks */ +#define NGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define NGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define NGBE_PX_TR_CFG_SWFLSH (1 << 26) /* Tx Desc. wr-bk flushing */ +#define NGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define NGBE_PX_TR_CFG_THRE_SHIFT 8 + +#define NGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ + (NGBE_PX_TR_RP((q_per_pool)*(vf_number) + (vf_q_index))) + +#define NGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ + (NGBE_PX_TR_WP((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Receive DMA Registers */ +#define NGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define NGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define NGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define NGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +/* PX_RR_CFG bit definitions */ +#define NGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define NGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define NGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define NGBE_PX_RR_CFG_VLAN 0x80000000U +#define NGBE_PX_RR_CFG_RSC 0x20000000U +#define NGBE_PX_RR_CFG_CNTAG 0x10000000U +#define NGBE_PX_RR_CFG_RSC_CNT_MD 0x08000000U +#define NGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define NGBE_PX_RR_CFG_STALL 0x02000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_1 0x00000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_4 0x00800000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_8 0x01000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_16 0x01800000U +#define NGBE_PX_RR_CFG_RR_THER 0x00070000U +#define NGBE_PX_RR_CFG_RR_THER_SHIFT 16 + +#define NGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define NGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define NGBE_PX_RR_CFG_RR_SZ 0x0000007EU +#define NGBE_PX_RR_CFG_RR_EN 0x00000001U + +/* statistic */ +#define NGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) /* [0,7] */ +#define NGBE_PX_BPRC(_i) (0x1024 + ((_i) * 64)) + + +#define NGBE_PX_MPTC(_i) (0x3020 + ((_i) * 64)) /* [0,7] */ +#define NGBE_PX_BPTC(_i) (0x3024 + ((_i) * 64)) + +#define NGBE_VX_GPRC 0x01014 +#define NGBE_VX_GORC_LSB 0x01018 +#define NGBE_VX_GORC_MSB 0x0101C +#define NGBE_VX_MPRC 0x01020 +#define NGBE_VX_BPRC 0x01024 + +#define NGBE_VX_GPTC 0x03014 +#define NGBE_VX_GOTC_LSB 0x03018 +#define NGBE_VX_GOTC_MSB 0x0301C +#define NGBE_VX_MPTC 0x03020 +#define NGBE_VX_BPTC 0x03024 + + + +#define NGBE_PX_GPRC 0x12504 + +#define NGBE_PX_GPTC 0x18308 + +#define NGBE_PX_GORC_LSB 0x12508 +#define NGBE_PX_GORC_MSB 0x1250C + +#define NGBE_PX_GOTC_LSB 0x1830C +#define NGBE_PX_GOTC_MSB 0x18310 + +/*************************** Flash region definition *************************/ +/* EEC Register */ +#define NGBE_EEC_SK 0x00000001U /* EEPROM Clock */ +#define NGBE_EEC_CS 0x00000002U /* EEPROM Chip Select */ +#define NGBE_EEC_DI 0x00000004U /* EEPROM Data In */ +#define NGBE_EEC_DO 0x00000008U /* EEPROM Data Out */ +#define NGBE_EEC_FWE_MASK 0x00000030U /* FLASH Write Enable */ +#define NGBE_EEC_FWE_DIS 0x00000010U /* Disable FLASH writes */ +#define NGBE_EEC_FWE_EN 0x00000020U /* Enable FLASH writes */ +#define NGBE_EEC_FWE_SHIFT 4 +#define NGBE_EEC_REQ 0x00000040U /* EEPROM Access Request */ +#define NGBE_EEC_GNT 0x00000080U /* EEPROM Access Grant */ +#define NGBE_EEC_PRES 0x00000100U /* EEPROM Present */ +#define NGBE_EEC_ARD 0x00000200U /* EEPROM Auto Read Done */ +#define NGBE_EEC_FLUP 0x00800000U /* Flash update command */ +#define NGBE_EEC_SEC1VAL 0x02000000U /* Sector 1 Valid */ +#define NGBE_EEC_FLUDONE 0x04000000U /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define NGBE_EEC_ADDR_SIZE 0x00000400U +#define NGBE_EEC_SIZE 0x00007800U /* EEPROM Size */ +#define NGBE_EERD_MAX_ADDR 0x00003FFFU /* EERD alows 14 bits for addr. */ + +#define NGBE_EEC_SIZE_SHIFT 11 +#define NGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define NGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define NGBE_FLA_LOCKED 0x00000040U + +/* Part Number String Length */ +#define NGBE_PBANUM_LENGTH 32 + +/* Checksum and EEPROM pointers */ +#define NGBE_PBANUM_PTR_GUARD 0xFAFA +#define NGBE_CHECKSUM_CAP_ST_PASS 0x80658383 +#define NGBE_CHECKSUM_CAP_ST_FAIL 0x70657376 +#define NGBE_ERR_ST 0xffffffff +#define NGBE_EEPROM_CHECKSUM 0x2F +#define NGBE_EEPROM_SUM 0xBABA +#define NGBE_OPTION_ROM_PTR 0x05 +#define NGBE_SHADOW_RAM_SIZE 0x4000 +#define NGBE_PCIE_CONFIG_SIZE 0x08 +#define NGBE_EEPROM_LAST_WORD 0x800 +#define NGBE_FW_PTR 0x0F +#define NGBE_SW_REGION_PTR 0x28 + +#define NGBE_CALSUM_COMMAND 0xE9 +#define NGBE_CALSUM_CAP_STATUS 0x10224 +#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C +#define NGBE_SAN_MAC_ADDR_PTR 0x18 +#define NGBE_DEVICE_CAPS 0x1C +#define NGBE_EEPROM_VERSION_L 0x1D +#define NGBE_EEPROM_VERSION_H 0x1E + +#define NGBE_MAX_MSIX_VECTORS_EMERALD 0x09 + +/* MSI-X capability fields masks */ +#define NGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* EEPROM Commands - SPI */ +#define NGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define NGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define NGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define NGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define NGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define NGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define NGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define NGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define NGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define NGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define NGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define NGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define NGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define NGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define NGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define NGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define NGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define NGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 + +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define NGBE_EEPROM_PAGE_SIZE_MAX 128 +#define NGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define NGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define NGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define NGBE_EEPROM_CCD_BIT 2 + +#ifndef NGBE_EEPROM_GRANT_ATTEMPTS +#define NGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +#ifndef NGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define NGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef NGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define NGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define NGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define NGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define NGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define NGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define NGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define NGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define NGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define NGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define NGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define NGBE_FW_LESM_STATE_1 0x1 +#define NGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define NGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define NGBE_FW_PATCH_VERSION_4 0x7 +#define NGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define NGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define NGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define NGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define NGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define NGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define NGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/******************************** PCI Bus Info *******************************/ +#define NGBE_PCI_DEVICE_STATUS 0xAA +#define NGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define NGBE_PCI_LINK_STATUS 0xB2 +#define NGBE_PCI_DEVICE_CONTROL2 0xC8 +#define NGBE_PCI_LINK_WIDTH 0x3F0 +#define NGBE_PCI_LINK_WIDTH_1 0x10 +#define NGBE_PCI_LINK_WIDTH_2 0x20 +#define NGBE_PCI_LINK_WIDTH_4 0x40 +#define NGBE_PCI_LINK_WIDTH_8 0x80 +#define NGBE_PCI_LINK_SPEED 0xF +#define NGBE_PCI_LINK_SPEED_2500 0x1 +#define NGBE_PCI_LINK_SPEED_5000 0x2 +#define NGBE_PCI_LINK_SPEED_8000 0x3 +#define NGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define NGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define NGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET 4 +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_MASK \ + (0x0001 << NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE \ + (0x01 << NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) + +#define NGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define NGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define NGBE_PCIDEVCTRL2_50_100us 0x1 +#define NGBE_PCIDEVCTRL2_1_2ms 0x2 +#define NGBE_PCIDEVCTRL2_16_32ms 0x5 +#define NGBE_PCIDEVCTRL2_65_130ms 0x6 +#define NGBE_PCIDEVCTRL2_260_520ms 0x9 +#define NGBE_PCIDEVCTRL2_1_2s 0xa +#define NGBE_PCIDEVCTRL2_4_8s 0xd +#define NGBE_PCIDEVCTRL2_17_34s 0xe + + +/******************* Receive Descriptor bit definitions **********************/ +#define NGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define NGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define NGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define NGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define NGBE_RXD_NEXTP_SHIFT 0x00000004U +#define NGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define NGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define NGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define NGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define NGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define NGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define NGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define NGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define NGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define NGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define NGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define NGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define NGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define NGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define NGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define NGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency + * Int */ +#define NGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define NGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define NGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define NGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define NGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define NGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define NGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define NGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define NGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define NGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define NGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define NGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define NGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define NGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define NGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define NGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define NGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define NGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define NGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define NGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU + +#define NGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define NGBE_RXD_TPID_MASK 0x000001C0U +#define NGBE_RXD_TPID_SHIFT 6 +#define NGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define NGBE_RXD_RSCCNT_MASK 0x001E0000U +#define NGBE_RXD_RSCCNT_SHIFT 17 +#define NGBE_RXD_HDRBUFLEN_SHIFT 5 +#define NGBE_RXD_SPLITHEADER_EN 0x00001000U +#define NGBE_RXD_SPH 0x8000 + +/* RSS Hash results */ +#define NGBE_RXD_RSSTYPE_NONE 0x00000000U +#define NGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define NGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define NGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define NGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define NGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define NGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define NGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define NGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define NGBE_PTYPE_TUN_IPV4 (0x80) +#define NGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define NGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define NGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define NGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define NGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define NGBE_PTYPE_PKT_MAC (0x10) +#define NGBE_PTYPE_PKT_IP (0x20) +#define NGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define NGBE_PTYPE_TYP_MAC (0x01) +#define NGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define NGBE_PTYPE_TYP_FIP (0x03) +#define NGBE_PTYPE_TYP_LLDP (0x04) +#define NGBE_PTYPE_TYP_CNM (0x05) +#define NGBE_PTYPE_TYP_EAPOL (0x06) +#define NGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define NGBE_PTYPE_PKT_IPV6 (0x08) +#define NGBE_PTYPE_TYP_IPFRAG (0x01) +#define NGBE_PTYPE_TYP_IP (0x02) +#define NGBE_PTYPE_TYP_UDP (0x03) +#define NGBE_PTYPE_TYP_TCP (0x04) +#define NGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define NGBE_PTYPE_PKT_VFT (0x08) +#define NGBE_PTYPE_TYP_FCOE (0x00) +#define NGBE_PTYPE_TYP_FCDATA (0x01) +#define NGBE_PTYPE_TYP_FCRDY (0x02) +#define NGBE_PTYPE_TYP_FCRSP (0x03) +#define NGBE_PTYPE_TYP_FCOTHER (0x04) + +/* Packet type non-ip values */ +enum ngbe_l2_ptypes { + NGBE_PTYPE_L2_ABORTED = (NGBE_PTYPE_PKT_MAC), + NGBE_PTYPE_L2_MAC = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_MAC), + NGBE_PTYPE_L2_TS = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_TS), + NGBE_PTYPE_L2_FIP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_FIP), + NGBE_PTYPE_L2_LLDP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_LLDP), + NGBE_PTYPE_L2_CNM = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_CNM), + NGBE_PTYPE_L2_EAPOL = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_EAPOL), + NGBE_PTYPE_L2_ARP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_ARP), + + NGBE_PTYPE_L2_IPV4_FRAG = (NGBE_PTYPE_PKT_IP | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV4 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV4_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV4_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV4_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_SCTP), + NGBE_PTYPE_L2_IPV6_FRAG = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV6 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV6_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV6_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV6_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_SCTP), + + NGBE_PTYPE_L2_FCOE = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_TYP_FCOE), + NGBE_PTYPE_L2_FCOE_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCOTHER), + NGBE_PTYPE_L2_FCOE_VFT = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_PKT_VFT), + NGBE_PTYPE_L2_FCOE_VFT_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_VFT_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_VFT_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCOTHER), + + NGBE_PTYPE_L2_TUN4_MAC = (NGBE_PTYPE_TUN_IPV4 | NGBE_PTYPE_PKT_IGM), + NGBE_PTYPE_L2_TUN6_MAC = (NGBE_PTYPE_TUN_IPV6 | NGBE_PTYPE_PKT_IGM), +}; + +#define NGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define NGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define NGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define NGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define NGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +#define NGBE_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) + +/* Security Processing bit Indication */ +#define NGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U +#define NGBE_RXD_LNKSEC_ERROR_REPLAY_ERROR 0x10000000U +#define NGBE_RXD_LNKSEC_ERROR_BIT_MASK 0x18000000U +#define NGBE_RXD_LNKSEC_ERROR_BAD_SIG 0x18000000U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define NGBE_RXD_ERR_FRAME_ERR_MASK NGBE_RXD_ERR_RXE + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define NGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define NGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define NGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define NGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define NGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define NGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define NGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define NGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define NGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define NGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define NGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define NGBE_TXD_RS 0x08000000U /* Report Status */ +#define NGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define NGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define NGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define NGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define NGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define NGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define NGBE_TXD_CC 0x00000080U /* Check Context */ +#define NGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define NGBE_TXD_IIPCS 0x00000400U +#define NGBE_TXD_EIPCS 0x00000800U +#define NGBE_TXD_L4CS 0x00000200U +#define NGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define NGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define NGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define NGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define NGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define NGBE_TXD_ENC_SHIFT 15 + +#define NGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define NGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define NGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define NGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define NGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define NGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define NGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define NGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define NGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define NGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define NGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define NGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define NGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define NGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define NGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define NGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define NGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define NGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define NGBE_TXD_TUNNEL_UDP (0x0ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) +#define NGBE_TXD_TUNNEL_GRE (0x1ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) + + +/************ ngbe_type.h ************/ +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define NGBE_REQ_TX_DESCRIPTOR_MULTIPLE 128 +#define NGBE_REQ_RX_DESCRIPTOR_MULTIPLE 128 +#define NGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define NGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define NGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define NGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define NGBE_TX_DESC_SPECIAL_PRI_SHIFT NGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor */ +union ngbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union ngbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ngbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/************************* Flow Directory HASH *******************************/ +/* Software ATR hash keys */ +#define NGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define NGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define NGBE_ATR_HASH_MASK 0x7fff +#define NGBE_ATR_L4TYPE_MASK 0x3 +#define NGBE_ATR_L4TYPE_UDP 0x1 +#define NGBE_ATR_L4TYPE_TCP 0x2 +#define NGBE_ATR_L4TYPE_SCTP 0x3 +#define NGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define NGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ngbe_atr_flow_type { + NGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + NGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + NGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + NGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + NGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + NGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + NGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + NGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + NGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + NGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + NGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + NGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + NGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + NGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + NGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + NGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union ngbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ngbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + + +/****************** Manageablility Host Interface defines ********************/ +#define NGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define NGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define NGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define NGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define NGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define NGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define NGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 +#define FW_EEPROM_CHECK_STATUS 0xE9 +#define FW_PHY_LED_CONF 0xF1 +#define FW_PHY_SIGNAL 0xF0 + + +/* Host Interface Command Structures */ +struct ngbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ngbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ngbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ngbe_hic_hdr2 { + struct ngbe_hic_hdr2_req req; + struct ngbe_hic_hdr2_rsp rsp; +}; + +struct ngbe_hic_drv_info { + struct ngbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct ngbe_hic_read_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_write_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_disable_rxen { + struct ngbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_reset { + struct ngbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct ngbe_hic_phy_cfg { + struct ngbe_hic_hdr hdr; + u8 lan_id; + u8 phy_mode; + u16 phy_speed; +}; + +enum ngbe_module_id { + NGBE_MODULE_EEPROM = 0, + NGBE_MODULE_FIRMWARE, + NGBE_MODULE_HARDWARE, + NGBE_MODULE_PCIE +}; + +struct ngbe_hic_upg_start { + struct ngbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_upg_write { + struct ngbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum ngbe_upg_flag { + NGBE_RESET_NONE = 0, + NGBE_RESET_FIRMWARE, + NGBE_RELOAD_EEPROM, + NGBE_RESET_LAN +}; + +struct ngbe_hic_upg_verify { + struct ngbe_hic_hdr hdr; + u32 action_flag; +}; + +struct ngbe_hic_write_lldp{ + struct ngbe_hic_hdr hdr; + u8 func; + u8 pad2; + u16 pad3; +}; + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define NGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define NGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define NGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* DCB registers */ +#define NGBE_DCB_MAX_TRAFFIC_CLASS 8 + +/* Power Manangbeent */ +/* DMA Coalescing configuration */ +struct ngbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + + +/* Autonegotiation advertised speeds */ +typedef u32 ngbe_autoneg_advertised; +/* Link speed */ +#define NGBE_LINK_SPEED_UNKNOWN 0 +#define NGBE_LINK_SPEED_100_FULL 1 +#define NGBE_LINK_SPEED_1GB_FULL 2 +#define NGBE_LINK_SPEED_10_FULL 8 +#define NGBE_LINK_SPEED_AUTONEG (NGBE_LINK_SPEED_100_FULL | \ + NGBE_LINK_SPEED_1GB_FULL | \ + NGBE_LINK_SPEED_10_FULL) + +/* Physical layer type */ +typedef u32 ngbe_physical_layer; +#define NGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define NGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define NGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define NGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define NGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define NGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define NGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define NGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + + +/* Special PHY Init Routine */ +#define NGBE_PHY_INIT_OFFSET_NL 0x002B +#define NGBE_PHY_INIT_END_NL 0xFFFF +#define NGBE_CONTROL_MASK_NL 0xF000 +#define NGBE_DATA_MASK_NL 0x0FFF +#define NGBE_CONTROL_SHIFT_NL 12 +#define NGBE_DELAY_NL 0 +#define NGBE_DATA_NL 1 +#define NGBE_CONTROL_NL 0x000F +#define NGBE_CONTROL_EOL_NL 0x0FFF +#define NGBE_CONTROL_SOL_NL 0x0000 + +/* ethtool */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 + + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define NGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define NGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define NGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define NGBE_CABLE_DC 5556 /* Delay Copper */ +#define NGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define NGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define NGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define NGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define NGBE_ID_X540 (NGBE_MAC_DC + NGBE_XAUI_DC + NGBE_PHY_DC) + +/* Calculate Interface Delay */ +#define NGBE_PHY_D 12800 +#define NGBE_MAC_D 4096 +#define NGBE_XAUI_D (2 * 1024) + +#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define NGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define NGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define NGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID_X540) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + + +/* Calculate delay value in bit times */ +#define NGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define NGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * NGBE_B2BT(_max_frame_tc) + \ + (36 * NGBE_PCI_DELAY / 25) + 1) + +#define NGBE_LOW_DV(_max_frame_tc) \ + (2 * NGBE_LOW_DV_X540(_max_frame_tc)) + + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ngbe_fcoe_boot_status { + ngbe_fcoe_bootstatus_disabled = 0, + ngbe_fcoe_bootstatus_enabled = 1, + ngbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ngbe_eeprom_type { + ngbe_eeprom_uninitialized = 0, + ngbe_eeprom_spi, + ngbe_flash, + ngbe_eeprom_none /* No NVM support */ +}; + +enum ngbe_phy_type { + ngbe_phy_unknown = 0, + ngbe_phy_none, + ngbe_phy_internal, + ngbe_phy_m88e1512, + ngbe_phy_m88e1512_sfi, + ngbe_phy_m88e1512_unknown, + ngbe_phy_yt8521s, + ngbe_phy_yt8521s_sfi, + ngbe_phy_sfp_passive_tyco, + ngbe_phy_sfp_passive_unknown, + ngbe_phy_sfp_active_unknown, + ngbe_phy_sfp_avago, + ngbe_phy_sfp_ftl, + ngbe_phy_sfp_ftl_active, + ngbe_phy_sfp_unknown, + ngbe_phy_sfp_intel, + ngbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ngbe_phy_internal_yt8521s_sfi, + ngbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum ngbe_sfp_type { + ngbe_sfp_type_da_cu = 0, + ngbe_sfp_type_sr = 1, + ngbe_sfp_type_lr = 2, + ngbe_sfp_type_da_cu_core0 = 3, + ngbe_sfp_type_da_cu_core1 = 4, + ngbe_sfp_type_srlr_core0 = 5, + ngbe_sfp_type_srlr_core1 = 6, + ngbe_sfp_type_da_act_lmt_core0 = 7, + ngbe_sfp_type_da_act_lmt_core1 = 8, + ngbe_sfp_type_1g_cu_core0 = 9, + ngbe_sfp_type_1g_cu_core1 = 10, + ngbe_sfp_type_1g_sx_core0 = 11, + ngbe_sfp_type_1g_sx_core1 = 12, + ngbe_sfp_type_1g_lx_core0 = 13, + ngbe_sfp_type_1g_lx_core1 = 14, + ngbe_sfp_type_not_present = 0xFFFE, + ngbe_sfp_type_unknown = 0xFFFF +}; + +enum ngbe_media_type { + ngbe_media_type_unknown = 0, + ngbe_media_type_fiber, + ngbe_media_type_copper, + ngbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ngbe_fc_mode { + ngbe_fc_none = 0, + ngbe_fc_rx_pause, + ngbe_fc_tx_pause, + ngbe_fc_full, + ngbe_fc_default +}; + +/* Smart Speed Settings */ +#define NGBE_SMARTSPEED_MAX_RETRIES 3 +enum ngbe_smart_speed { + ngbe_smart_speed_auto = 0, + ngbe_smart_speed_on, + ngbe_smart_speed_off +}; + +/* PCI bus types */ +enum ngbe_bus_type { + ngbe_bus_type_unknown = 0, + ngbe_bus_type_pci, + ngbe_bus_type_pcix, + ngbe_bus_type_pci_express, + ngbe_bus_type_internal, + ngbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ngbe_bus_speed { + ngbe_bus_speed_unknown = 0, + ngbe_bus_speed_33 = 33, + ngbe_bus_speed_66 = 66, + ngbe_bus_speed_100 = 100, + ngbe_bus_speed_120 = 120, + ngbe_bus_speed_133 = 133, + ngbe_bus_speed_2500 = 2500, + ngbe_bus_speed_5000 = 5000, + ngbe_bus_speed_8000 = 8000, + ngbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ngbe_bus_width { + ngbe_bus_width_unknown = 0, + ngbe_bus_width_pcie_x1 = 1, + ngbe_bus_width_pcie_x2 = 2, + ngbe_bus_width_pcie_x4 = 4, + ngbe_bus_width_pcie_x8 = 8, + ngbe_bus_width_32 = 32, + ngbe_bus_width_64 = 64, + ngbe_bus_width_reserved +}; + +struct ngbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ngbe_bus_info { + enum pci_bus_speed speed; + enum pcie_link_width width; + enum ngbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ngbe_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ngbe_fc_mode current_mode; /* FC mode in effect */ + enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ngbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ngbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ngbe_mc_addr_itr) (struct ngbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ngbe_eeprom_operations { + int (*init_params)(struct ngbe_hw *); + int (*read)(struct ngbe_hw *, u16, u16 *); + int (*read_buffer)(struct ngbe_hw *, u16, u16, u16 *); + int (*read32)(struct ngbe_hw *, u16, u32 *); + int (*write)(struct ngbe_hw *, u16, u16); + int (*write_buffer)(struct ngbe_hw *, u16, u16, u16 *); + int (*validate_checksum)(struct ngbe_hw *, u16 *); + int (*update_checksum)(struct ngbe_hw *); + int (*calc_checksum)(struct ngbe_hw *); + int (*eeprom_chksum_cap_st)(struct ngbe_hw *, u16, u32 *); + int (*phy_led_oem_chk)(struct ngbe_hw *, u32 *); + int (*phy_signal_set)(struct ngbe_hw *); +}; + +struct ngbe_flash_operations { + int (*init_params)(struct ngbe_hw *); + int (*read_buffer)(struct ngbe_hw *, u32, u32, u32 *); + int (*write_buffer)(struct ngbe_hw *, u32, u32, u32 *); + int (*check_led_oem)(struct ngbe_hw *); +}; + +struct ngbe_mac_operations { + int (*init_hw)(struct ngbe_hw *); + int (*reset_hw)(struct ngbe_hw *); + int (*start_hw)(struct ngbe_hw *); + int (*clear_hw_cntrs)(struct ngbe_hw *); + enum ngbe_media_type (*get_media_type)(struct ngbe_hw *); + int (*get_mac_addr)(struct ngbe_hw *, u8 *); + int (*get_device_caps)(struct ngbe_hw *, u16 *); + int (*stop_adapter)(struct ngbe_hw *); + int (*get_bus_info)(struct ngbe_hw *); + void (*set_lan_id)(struct ngbe_hw *); + int (*enable_rx_dma)(struct ngbe_hw *, u32); + int (*disable_sec_rx_path)(struct ngbe_hw *); + int (*enable_sec_rx_path)(struct ngbe_hw *); + int (*acquire_swfw_sync)(struct ngbe_hw *, u32); + void (*release_swfw_sync)(struct ngbe_hw *, u32); + + /* Link */ + void (*disable_tx_laser)(struct ngbe_hw *); + void (*enable_tx_laser)(struct ngbe_hw *); + void (*flap_tx_laser)(struct ngbe_hw *); + int (*setup_link)(struct ngbe_hw *, u32, bool); + int (*setup_mac_link)(struct ngbe_hw *, u32, bool); + int (*check_link)(struct ngbe_hw *, u32 *, bool *, bool); + int (*get_link_capabilities)(struct ngbe_hw *, u32 *, + bool *); + void (*set_rate_select_speed)(struct ngbe_hw *, u32); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ngbe_hw *, int, u32, int); + + /* LED */ + int (*led_on)(struct ngbe_hw *, u32); + int (*led_off)(struct ngbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + int (*set_rar)(struct ngbe_hw *, u32, u8 *, u64, u32); + int (*clear_rar)(struct ngbe_hw *, u32); + int (*insert_mac_addr)(struct ngbe_hw *, u8 *, u32); + int (*set_vmdq)(struct ngbe_hw *, u32, u32); + int (*set_vmdq_san_mac)(struct ngbe_hw *, u32); + int (*clear_vmdq)(struct ngbe_hw *, u32, u32); + int (*init_rx_addrs)(struct ngbe_hw *); + int (*update_uc_addr_list)(struct ngbe_hw *, u8 *, u32, + ngbe_mc_addr_itr); + int (*update_mc_addr_list)(struct ngbe_hw *, u8 *, u32, + ngbe_mc_addr_itr, bool clear); + int (*enable_mc)(struct ngbe_hw *); + int (*disable_mc)(struct ngbe_hw *); + int (*clear_vfta)(struct ngbe_hw *); + int (*set_vfta)(struct ngbe_hw *, u32, u32, bool); + int (*set_vlvf)(struct ngbe_hw *, u32, u32, bool, bool *); + int (*init_uta_tables)(struct ngbe_hw *); + void (*set_mac_anti_spoofing)(struct ngbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ngbe_hw *, bool, int); + + /* Flow Control */ + int (*fc_enable)(struct ngbe_hw *); + int (*setup_fc)(struct ngbe_hw *); + + /* Manageability interface */ + int (*set_fw_drv_ver)(struct ngbe_hw *, u8, u8, u8, u8); + int (*get_thermal_sensor_data)(struct ngbe_hw *); + int (*init_thermal_sensor_thresh)(struct ngbe_hw *hw); + void (*get_rtrup2tc)(struct ngbe_hw *hw, u8 *map); + void (*disable_rx)(struct ngbe_hw *hw); + void (*enable_rx)(struct ngbe_hw *hw); + void (*set_source_address_pruning)(struct ngbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ngbe_hw *, bool, int); + int (*dmac_config)(struct ngbe_hw *hw); + int (*setup_eee)(struct ngbe_hw *hw, bool enable_eee); +}; + +struct ngbe_phy_operations { + int (*identify)(struct ngbe_hw *); + int (*identify_sfp)(struct ngbe_hw *); + int (*init)(struct ngbe_hw *); + int (*reset)(struct ngbe_hw *); + int (*read)(struct ngbe_hw *, int, int); + int (*write)(struct ngbe_hw *, int, int, u16); + int (*read_reg)(struct ngbe_hw *, u32, u32, u16 *); + int (*write_reg)(struct ngbe_hw *, u32, u32, u16); + int (*read_reg_mdi)(struct ngbe_hw *, u32, u32, u16 *); + int (*write_reg_mdi)(struct ngbe_hw *, u32, u32, u16); + u32 (*setup_link)(struct ngbe_hw *, u32, bool); + int (*phy_suspend)(struct ngbe_hw *hw); + int (*phy_resume)(struct ngbe_hw *hw); + u32 (*phy_led_ctrl)(struct ngbe_hw *); + int (*setup_internal_link)(struct ngbe_hw *); + u32 (*setup_link_speed)(struct ngbe_hw *, u32, bool); + int (*check_link)(struct ngbe_hw *, u32 *, bool *); + int (*check_overtemp)(struct ngbe_hw *); + int (*check_event)(struct ngbe_hw *); + int (*get_adv_pause)(struct ngbe_hw *, u8 *); + int (*get_lp_adv_pause)(struct ngbe_hw *, u8 *); + int (*set_adv_pause)(struct ngbe_hw *, u16); + int (*setup_once)(struct ngbe_hw *); +}; + +struct ngbe_eeprom_info { + struct ngbe_eeprom_operations ops; + enum ngbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; + u16 sw_region_offset; +}; + +struct ngbe_flash_info { + struct ngbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + +#define NGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ngbe_mac_info { + struct ngbe_mac_operations ops; + u8 addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define NGBE_MAX_MTA 128 +#define NGBE_MAX_VFTA_ENTRIES 128 + u32 mta_shadow[NGBE_MAX_MTA]; + int mc_filter_type; + u32 mcft_size; + u32 vft_shadow[NGBE_MAX_VFTA_ENTRIES]; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 san_mac_rar_index; + bool get_link_status; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct ngbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct ngbe_dmac_config dmac_config; + bool set_lben; + bool autoneg; +}; + +struct ngbe_phy_info { + struct ngbe_phy_operations ops; + enum ngbe_phy_type type; + u32 addr; + u32 id; + enum ngbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ngbe_media_type media_type; + u32 phy_semaphore_mask; + u8 lan_id; /* to be delete */ + ngbe_autoneg_advertised autoneg_advertised; + enum ngbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + ngbe_physical_layer link_mode; + u32 force_speed; +}; + +#include "ngbe_mbx.h" + +struct ngbe_mbx_operations { + void (*init_params)(struct ngbe_hw *hw); + int (*read)(struct ngbe_hw *, u32 *, u16, u16); + int (*write)(struct ngbe_hw *, u32 *, u16, u16); + int (*read_posted)(struct ngbe_hw *, u32 *, u16, u16); + int (*write_posted)(struct ngbe_hw *, u32 *, u16, u16); + int (*check_for_msg)(struct ngbe_hw *, u16); + int (*check_for_ack)(struct ngbe_hw *, u16); + int (*check_for_rst)(struct ngbe_hw *, u16); +}; + +struct ngbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ngbe_mbx_info { + struct ngbe_mbx_operations ops; + struct ngbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +enum ngbe_reset_type { + NGBE_LAN_RESET = 0, + NGBE_SW_RESET, + NGBE_GLOBAL_RESET +}; + +enum ngbe_link_status { + NGBE_LINK_STATUS_NONE = 0, + NGBE_LINK_STATUS_KX, + NGBE_LINK_STATUS_KX4 +}; + +enum em_mac_type { + em_mac_type_unknown = 0, + em_mac_type_mdi, + em_mac_type_rgmii +}; + +struct ngbe_hw { + u8 IOMEM *hw_addr; + void *back; + struct ngbe_mac_info mac; + struct ngbe_addr_filter_info addr_ctrl; + struct ngbe_fc_info fc; + struct ngbe_phy_info phy; + struct ngbe_eeprom_info eeprom; + struct ngbe_flash_info flash; + struct ngbe_bus_info bus; + struct ngbe_mbx_info mbx; + enum em_mac_type mac_type; + + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + enum ngbe_reset_type reset_type; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + enum ngbe_link_status link_status; + u16 tpid[8]; + bool gpio_ctl; + bool ncsi_enabled; + u8 restart_an; + u16 oem_ssid; + u16 oem_svid; + spinlock_t phy_lock; +}; + +/* Error Codes */ +#define NGBE_OK 0 +#define NGBE_ERR 100 +#define NGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-NGBE_ERR, NGBE_ERR): reserved for non-ngbe defined error code */ +#define NGBE_ERR_NOSUPP -(NGBE_ERR+0) +#define NGBE_ERR_EEPROM -(NGBE_ERR+1) +#define NGBE_ERR_EEPROM_CHECKSUM -(NGBE_ERR+2) +#define NGBE_ERR_PHY -(NGBE_ERR+3) +#define NGBE_ERR_CONFIG -(NGBE_ERR+4) +#define NGBE_ERR_PARAM -(NGBE_ERR+5) +#define NGBE_ERR_MAC_TYPE -(NGBE_ERR+6) +#define NGBE_ERR_UNKNOWN_PHY -(NGBE_ERR+7) +#define NGBE_ERR_LINK_SETUP -(NGBE_ERR+8) +#define NGBE_ERR_ADAPTER_STOPPED -(NGBE_ERR+09) +#define NGBE_ERR_INVALID_MAC_ADDR -(NGBE_ERR+10) +#define NGBE_ERR_DEVICE_NOT_SUPPORTED -(NGBE_ERR+11) +#define NGBE_ERR_MASTER_REQUESTS_PENDING -(NGBE_ERR+12) +#define NGBE_ERR_INVALID_LINK_SETTINGS -(NGBE_ERR+13) +#define NGBE_ERR_AUTONEG_NOT_COMPLETE -(NGBE_ERR+14) +#define NGBE_ERR_RESET_FAILED -(NGBE_ERR+15) +#define NGBE_ERR_SWFW_SYNC -(NGBE_ERR+16) +#define NGBE_ERR_PHY_ADDR_INVALID -(NGBE_ERR+17) +#define NGBE_ERR_I2C -(NGBE_ERR+18) +#define NGBE_ERR_SFP_NOT_SUPPORTED -(NGBE_ERR+19) +#define NGBE_ERR_SFP_NOT_PRESENT -(NGBE_ERR+20) +#define NGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(NGBE_ERR+21) +#define NGBE_ERR_NO_SAN_ADDR_PTR -(NGBE_ERR+22) +#define NGBE_ERR_FDIR_REINIT_FAILED -(NGBE_ERR+23) +#define NGBE_ERR_EEPROM_VERSION -(NGBE_ERR+24) +#define NGBE_ERR_NO_SPACE -(NGBE_ERR+25) +#define NGBE_ERR_OVERTEMP -(NGBE_ERR+26) +#define NGBE_ERR_UNDERTEMP -(NGBE_ERR+27) +#define NGBE_ERR_FC_NOT_NEGOTIATED -(NGBE_ERR+28) +#define NGBE_ERR_FC_NOT_SUPPORTED -(NGBE_ERR+29) +#define NGBE_ERR_SFP_SETUP_NOT_COMPLETE -(NGBE_ERR+30) +#define NGBE_ERR_PBA_SECTION -(NGBE_ERR+31) +#define NGBE_ERR_INVALID_ARGUMENT -(NGBE_ERR+32) +#define NGBE_ERR_HOST_INTERFACE_COMMAND -(NGBE_ERR+33) +#define NGBE_ERR_OUT_OF_MEM -(NGBE_ERR+34) +#define NGBE_ERR_FEATURE_NOT_SUPPORTED -(NGBE_ERR+36) +#define NGBE_ERR_EEPROM_PROTECTED_REGION -(NGBE_ERR+37) +#define NGBE_ERR_FDIR_CMD_INCOMPLETE -(NGBE_ERR+38) +#define NGBE_ERR_FLASH_LOADING_FAILED -(NGBE_ERR+39) +#define NGBE_ERR_XPCS_POWER_UP_FAILED -(NGBE_ERR+40) +#define NGBE_ERR_FW_RESP_INVALID -(NGBE_ERR+41) +#define NGBE_ERR_PHY_INIT_NOT_DONE -(NGBE_ERR+42) +#define NGBE_ERR_TIMEOUT -(NGBE_ERR+43) +#define NGBE_ERR_TOKEN_RETRY -(NGBE_ERR+44) +#define NGBE_ERR_REGISTER -(NGBE_ERR+45) +#define NGBE_ERR_MBX -(NGBE_ERR+46) +#define NGBE_ERR_MNG_ACCESS_FAILED -(NGBE_ERR+47) +#define NGBE_ERR_PHY_TYPE -(NGBE_ERR+48) +#define NGBE_ERR_PHY_TIMEOUT -(NGBE_ERR+49) + +/** + * register operations + **/ +/* read register */ +#define NGBE_DEAD_READ_RETRIES 10 +#define NGBE_DEAD_READ_REG 0xdeadbeefU +#define NGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL + +#define NGBE_FAILED_READ_REG 0xffffffffU +#define NGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +#define NGBE_LLDP_REG 0xf1000 +#define NGBE_LLDP_ON 0x0000000f + +static inline bool NGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +ngbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct ngbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + + return val; +} +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline u32 +rd32m(struct ngbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +ngbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct ngbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + ngbe_wr32(base + reg, val); +} +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct ngbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + ngbe_wr32(base + reg, val); +} + +/* poll register */ +#define NGBE_MDIO_TIMEOUT 1000 +#define NGBE_I2C_TIMEOUT 1000 +#define NGBE_SPI_TIMEOUT 1000 +static inline int +po32m(struct ngbe_hw *hw, u32 reg, + u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + + usec_delay(usecs); + } while (true); + + return (count - loop <= count ? 0 : NGBE_ERR_TIMEOUT); +} + +#define NGBE_WRITE_FLUSH(H) rd32(H, NGBE_MIS_PWR) + +#endif /* _NGBE_TYPE_H_ */ -- Gitee From d76bcfc06306364ee36f98824e2572c7cef7b687 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Sun, 7 Apr 2024 17:00:51 +0800 Subject: [PATCH 2/2] config: add ngbe mod default option in X86_64 or arm64 arch, set CONFIG_WANGXUN=y, and CONFIG_NGBE=m. Signed-off-by: Duanqiang Wen --- arch/arm64/configs/oc.config | 2 ++ arch/x86/configs/oc.config | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/oc.config b/arch/arm64/configs/oc.config index bb36691819ee..e454669c79fb 100644 --- a/arch/arm64/configs/oc.config +++ b/arch/arm64/configs/oc.config @@ -751,6 +751,8 @@ CONFIG_I40E=m CONFIG_I40EVF=m CONFIG_FM10K=m CONFIG_IGC=m +CONFIG_WANGXUN=y +CONFIG_NGBE=m # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m diff --git a/arch/x86/configs/oc.config b/arch/x86/configs/oc.config index 35673a97488e..d6a1e8d7c003 100644 --- a/arch/x86/configs/oc.config +++ b/arch/x86/configs/oc.config @@ -774,6 +774,8 @@ CONFIG_IXGBEVF=m CONFIG_I40E=m CONFIG_I40EVF=m CONFIG_IGC=m +CONFIG_WANGXUN=y +CONFIG_NGBE=m CONFIG_JME=m CONFIG_MVMDIO=m CONFIG_SKGE=m -- Gitee