diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 23b1abe2a5205cb339759fe8df5ee955ae605b89..d8caa9d4d7179efeffd4e5c4cd9f5092d80b215b 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -192,8 +192,8 @@ source "drivers/net/ethernet/toshiba/Kconfig" source "drivers/net/ethernet/tundra/Kconfig" source "drivers/net/ethernet/vertexcom/Kconfig" source "drivers/net/ethernet/via/Kconfig" -source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" +source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/bzwx/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5d1bcbac04b03190de3b1b60e6f130314d23ffb3..018c1ebbfd7b5e1263d3ecb11a5f3a12cab512ef 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -102,8 +102,8 @@ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/ obj-$(CONFIG_NET_VENDOR_VIA) += via/ -obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ +obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 23cd610bd3766c2b2785e94f1faa9da54990ec00..232582fa384f5298c022db249f9b2f85b9559c3d 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -16,17 +16,9 @@ config NET_VENDOR_WANGXUN if NET_VENDOR_WANGXUN -config LIBWX - tristate - select PAGE_POOL - help - Common library for Wangxun(R) Ethernet drivers. - config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI - select LIBWX - select PHYLIB help This driver supports Wangxun(R) GbE PCI Express family of adapters. @@ -40,18 +32,7 @@ config NGBE config TXGBE tristate "Wangxun(R) 10GbE PCI Express adapters support" depends on PCI - depends on COMMON_CLK - select MARVELL_10G_PHY - select REGMAP - select I2C - select I2C_DESIGNWARE_PLATFORM - select PHYLINK select HWMON if TXGBE=y - select SFP - select GPIOLIB - select GPIOLIB_IRQCHIP - select PCS_XPCS - select LIBWX help This driver supports Wangxun(R) 10GbE PCI Express family of adapters. diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile index ca19311dbe3892caf81816092d121227b50a0eb1..c27c5dc3a4c688f9dc305458cd86f21707178228 100644 --- a/drivers/net/ethernet/wangxun/Makefile +++ b/drivers/net/ethernet/wangxun/Makefile @@ -3,6 +3,5 @@ # Makefile for the Wangxun network device drivers. # -obj-$(CONFIG_LIBWX) += libwx/ -obj-$(CONFIG_TXGBE) += txgbe/ obj-$(CONFIG_NGBE) += ngbe/ +obj-$(CONFIG_TXGBE) += txgbe/ \ No newline at end of file diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile deleted file mode 100644 index 42ccd6e4052e51a81a4fcdfa67f1648e0342e225..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. -# - -obj-$(CONFIG_LIBWX) += libwx.o - -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c deleted file mode 100644 index 93cb6f2294e72e235abbc90ed47aaaf91bfd2ff3..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#include -#include - -#include "wx_type.h" -#include "wx_ethtool.h" - -void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) -{ - struct wx *wx = netdev_priv(netdev); - - strscpy(info->driver, wx->driver_name, sizeof(info->driver)); - strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); - strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); -} -EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h deleted file mode 100644 index e85538c69454070bbccd60a5e1f549e75677442b..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _WX_ETHTOOL_H_ -#define _WX_ETHTOOL_H_ - -void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); -#endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c deleted file mode 100644 index d6bc2309d2a388c4ff1c0f213e8d09f39f6e795c..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ /dev/null @@ -1,1935 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include - -#include "wx_type.h" -#include "wx_lib.h" -#include "wx_hw.h" - -static void wx_intr_disable(struct wx *wx, u64 qmask) -{ - u32 mask; - - mask = (qmask & U32_MAX); - if (mask) - wr32(wx, WX_PX_IMS(0), mask); - - if (wx->mac.type == wx_mac_sp) { - mask = (qmask >> 32); - if (mask) - wr32(wx, WX_PX_IMS(1), mask); - } -} - -void wx_intr_enable(struct wx *wx, u64 qmask) -{ - u32 mask; - - mask = (qmask & U32_MAX); - if (mask) - wr32(wx, WX_PX_IMC(0), mask); - if (wx->mac.type == wx_mac_sp) { - mask = (qmask >> 32); - if (mask) - wr32(wx, WX_PX_IMC(1), mask); - } -} -EXPORT_SYMBOL(wx_intr_enable); - -/** - * wx_irq_disable - Mask off interrupt generation on the NIC - * @wx: board private structure - **/ -void wx_irq_disable(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - - wr32(wx, WX_PX_MISC_IEN, 0); - wx_intr_disable(wx, WX_INTR_ALL); - - if (pdev->msix_enabled) { - int vector; - - for (vector = 0; vector < wx->num_q_vectors; vector++) - synchronize_irq(wx->msix_entries[vector].vector); - - synchronize_irq(wx->msix_entries[vector].vector); - } else { - synchronize_irq(pdev->irq); - } -} -EXPORT_SYMBOL(wx_irq_disable); - -/* cmd_addr is used for some special command: - * 1. to be sector address, when implemented erase sector command - * 2. to be flash address when implemented read, write flash address - */ -static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr) -{ - u32 cmd_val = 0, val = 0; - - cmd_val = WX_SPI_CMD_CMD(cmd) | - WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) | - cmd_addr; - wr32(wx, WX_SPI_CMD, cmd_val); - - return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000, - false, wx, WX_SPI_STATUS); -} - -static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data) -{ - int ret = 0; - - ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr); - if (ret < 0) - return ret; - - *data = rd32(wx, WX_SPI_DATA); - - return ret; -} - -int wx_check_flash_load(struct wx *hw, u32 check_bit) -{ - u32 reg = 0; - int err = 0; - - /* if there's flash existing */ - if (!(rd32(hw, WX_SPI_STATUS) & - WX_SPI_STATUS_FLASH_BYPASS)) { - /* wait hw load flash done */ - err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000, - false, hw, WX_SPI_ILDR_STATUS); - if (err < 0) - wx_err(hw, "Check flash load timeout.\n"); - } - - return err; -} -EXPORT_SYMBOL(wx_check_flash_load); - -void wx_control_hw(struct wx *wx, bool drv) -{ - /* True : Let firmware know the driver has taken over - * False : Let firmware take over control of hw - */ - wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD, - drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0); -} -EXPORT_SYMBOL(wx_control_hw); - -/** - * wx_mng_present - returns 0 when management capability is present - * @wx: pointer to hardware structure - */ -int wx_mng_present(struct wx *wx) -{ - u32 fwsm; - - fwsm = rd32(wx, WX_MIS_ST); - if (fwsm & WX_MIS_ST_MNG_INIT_DN) - return 0; - else - return -EACCES; -} -EXPORT_SYMBOL(wx_mng_present); - -/* Software lock to be held while software semaphore is being accessed. */ -static DEFINE_MUTEX(wx_sw_sync_lock); - -/** - * wx_release_sw_sync - Release SW semaphore - * @wx: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SW semaphore for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -static void wx_release_sw_sync(struct wx *wx, u32 mask) -{ - mutex_lock(&wx_sw_sync_lock); - wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0); - mutex_unlock(&wx_sw_sync_lock); -} - -/** - * wx_acquire_sw_sync - Acquire SW semaphore - * @wx: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SW semaphore for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -static int wx_acquire_sw_sync(struct wx *wx, u32 mask) -{ - u32 sem = 0; - int ret = 0; - - mutex_lock(&wx_sw_sync_lock); - ret = read_poll_timeout(rd32, sem, !(sem & mask), - 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC); - if (!ret) { - sem |= mask; - wr32(wx, WX_MNG_SWFW_SYNC, sem); - } else { - wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem); - } - mutex_unlock(&wx_sw_sync_lock); - - return ret; -} - -/** - * wx_host_interface_command - Issue command to manageability block - * @wx: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. - **/ -int wx_host_interface_command(struct wx *wx, u32 *buffer, - u32 length, u32 timeout, bool return_data) -{ - u32 hdr_size = sizeof(struct wx_hic_hdr); - u32 hicr, i, bi, buf[64] = {}; - int status = 0; - u32 dword_len; - u16 buf_len; - - if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { - wx_err(wx, "Buffer length failure buffersize=%d.\n", length); - return -EINVAL; - } - - status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); - if (status != 0) - return status; - - /* Calculate length in DWORDs. We must be DWORD aligned */ - if ((length % (sizeof(u32))) != 0) { - wx_err(wx, "Buffer length failure, not aligned to dword"); - status = -EINVAL; - goto rel_out; - } - - dword_len = length >> 2; - - /* The device driver writes the relevant command block - * into the ram area. - */ - for (i = 0; i < dword_len; i++) { - wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); - /* write flush */ - buf[i] = rd32a(wx, WX_MNG_MBOX, i); - } - /* Setting this bit tells the ARC that a new command is pending. */ - wr32m(wx, WX_MNG_MBOX_CTL, - WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY); - - status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, - timeout * 1000, false, wx, WX_MNG_MBOX_CTL); - - buf[0] = rd32(wx, WX_MNG_MBOX); - if ((buf[0] & 0xff0000) >> 16 == 0x80) { - wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff); - status = -EINVAL; - goto rel_out; - } - - /* Check command completion */ - if (status) { - wx_err(wx, "Command has failed with no status valid.\n"); - wx_dbg(wx, "write value:\n"); - for (i = 0; i < dword_len; i++) - wx_dbg(wx, "%x ", buffer[i]); - wx_dbg(wx, "read value:\n"); - for (i = 0; i < dword_len; i++) - wx_dbg(wx, "%x ", buf[i]); - wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24); - - goto rel_out; - } - - if (!return_data) - goto rel_out; - - /* Calculate length in DWORDs */ - dword_len = hdr_size >> 2; - - /* first pull in the header so we know the buffer length */ - for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); - le32_to_cpus(&buffer[bi]); - } - - /* If there is any thing in data position pull it in */ - buf_len = ((struct wx_hic_hdr *)buffer)->buf_len; - if (buf_len == 0) - goto rel_out; - - if (length < buf_len + hdr_size) { - wx_err(wx, "Buffer not large enough for reply message.\n"); - status = -EFAULT; - goto rel_out; - } - - /* Calculate length in DWORDs, add 3 for odd lengths */ - dword_len = (buf_len + 3) >> 2; - - /* Pull in the rest of the buffer (bi is where we left off) */ - for (; bi <= dword_len; bi++) { - buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); - le32_to_cpus(&buffer[bi]); - } - -rel_out: - wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); - return status; -} -EXPORT_SYMBOL(wx_host_interface_command); - -/** - * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd - * assuming that the semaphore is already obtained. - * @wx: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) -{ - struct wx_hic_read_shadow_ram buffer; - int status; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = (__force u32)cpu_to_be32(offset * 2); - /* one word */ - buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); - - status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), - WX_HI_COMMAND_TIMEOUT, false); - - if (status != 0) - return status; - - *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); - - return status; -} - -/** - * wx_read_ee_hostif - Read EEPROM word using a host interface cmd - * @wx: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data) -{ - int status = 0; - - status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); - if (status == 0) { - status = wx_read_ee_hostif_data(wx, offset, data); - wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); - } - - return status; -} -EXPORT_SYMBOL(wx_read_ee_hostif); - -/** - * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif - * @wx: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the hostif. - **/ -int wx_read_ee_hostif_buffer(struct wx *wx, - u16 offset, u16 words, u16 *data) -{ - struct wx_hic_read_shadow_ram buffer; - u32 current_word = 0; - u16 words_to_read; - u32 value = 0; - int status; - u32 i; - - /* Take semaphore for the entire operation. */ - status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); - if (status != 0) - return status; - - while (words) { - if (words > FW_MAX_READ_BUFFER_SIZE / 2) - words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; - else - words_to_read = words; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2); - buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); - - status = wx_host_interface_command(wx, (u32 *)&buffer, - sizeof(buffer), - WX_HI_COMMAND_TIMEOUT, - false); - - if (status != 0) { - wx_err(wx, "Host interface command failed\n"); - goto out; - } - - for (i = 0; i < words_to_read; i++) { - u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; - - value = rd32(wx, reg); - data[current_word] = (u16)(value & 0xffff); - current_word++; - i++; - if (i < words_to_read) { - value >>= 16; - data[current_word] = (u16)(value & 0xffff); - current_word++; - } - } - words -= words_to_read; - } - -out: - wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); - return status; -} -EXPORT_SYMBOL(wx_read_ee_hostif_buffer); - -/** - * wx_init_eeprom_params - Initialize EEPROM params - * @wx: pointer to hardware structure - * - * Initializes the EEPROM parameters wx_eeprom_info within the - * wx_hw struct in order to set up EEPROM access. - **/ -void wx_init_eeprom_params(struct wx *wx) -{ - struct wx_eeprom_info *eeprom = &wx->eeprom; - u16 eeprom_size; - u16 data = 0x80; - - if (eeprom->type == wx_eeprom_uninitialized) { - eeprom->semaphore_delay = 10; - eeprom->type = wx_eeprom_none; - - if (!(rd32(wx, WX_SPI_STATUS) & - WX_SPI_STATUS_FLASH_BYPASS)) { - eeprom->type = wx_flash; - - eeprom_size = 4096; - eeprom->word_size = eeprom_size >> 1; - - wx_dbg(wx, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); - } - } - - if (wx->mac.type == wx_mac_sp) { - if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { - wx_err(wx, "NVM Read Error\n"); - return; - } - data = data >> 1; - } - - eeprom->sw_region_offset = data; -} -EXPORT_SYMBOL(wx_init_eeprom_params); - -/** - * wx_get_mac_addr - Generic get MAC address - * @wx: pointer to hardware structure - * @mac_addr: Adapter MAC address - * - * Reads the adapter's MAC address from first Receive Address Register (RAR0) - * A reset of the adapter must be performed prior to calling this function - * in order for the MAC address to have been loaded from the EEPROM into RAR0 - **/ -void wx_get_mac_addr(struct wx *wx, u8 *mac_addr) -{ - u32 rar_high; - u32 rar_low; - u16 i; - - wr32(wx, WX_PSR_MAC_SWC_IDX, 0); - rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H); - rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L); - - for (i = 0; i < 2; i++) - mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); - - for (i = 0; i < 4; i++) - mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); -} -EXPORT_SYMBOL(wx_get_mac_addr); - -/** - * wx_set_rar - Set Rx address register - * @wx: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @pools: VMDq "set" or "pool" index - * @enable_addr: set flag that address is active - * - * Puts an ethernet address into a receive address register. - **/ -static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, - u32 enable_addr) -{ - u32 rar_entries = wx->mac.num_rar_entries; - u32 rar_low, rar_high; - - /* Make sure we are using a valid rar index range */ - if (index >= rar_entries) { - wx_err(wx, "RAR index %d is out of range.\n", index); - return -EINVAL; - } - - /* select the MAC address */ - wr32(wx, WX_PSR_MAC_SWC_IDX, index); - - /* setup VMDq pool mapping */ - wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - if (wx->mac.type == wx_mac_sp) - wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); - - /* HW expects these in little endian so we reverse the byte - * order from network order (big endian) to little endian - * - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_low = ((u32)addr[5] | - ((u32)addr[4] << 8) | - ((u32)addr[3] << 16) | - ((u32)addr[2] << 24)); - rar_high = ((u32)addr[1] | - ((u32)addr[0] << 8)); - if (enable_addr != 0) - rar_high |= WX_PSR_MAC_SWC_AD_H_AV; - - wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low); - wr32m(wx, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | - WX_PSR_MAC_SWC_AD_H_AV), - rar_high); - - return 0; -} - -/** - * wx_clear_rar - Remove Rx address register - * @wx: pointer to hardware structure - * @index: Receive address register to write - * - * Clears an ethernet address from a receive address register. - **/ -static int wx_clear_rar(struct wx *wx, u32 index) -{ - u32 rar_entries = wx->mac.num_rar_entries; - - /* Make sure we are using a valid rar index range */ - if (index >= rar_entries) { - wx_err(wx, "RAR index %d is out of range.\n", index); - return -EINVAL; - } - - /* Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - wr32(wx, WX_PSR_MAC_SWC_IDX, index); - - wr32(wx, WX_PSR_MAC_SWC_VM_L, 0); - wr32(wx, WX_PSR_MAC_SWC_VM_H, 0); - - wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); - wr32m(wx, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | - WX_PSR_MAC_SWC_AD_H_AV), - 0); - - return 0; -} - -/** - * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address - * @wx: pointer to hardware struct - * @rar: receive address register index to disassociate - * @vmdq: VMDq pool index to remove from the rar - **/ -static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) -{ - u32 rar_entries = wx->mac.num_rar_entries; - u32 mpsar_lo, mpsar_hi; - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - wx_err(wx, "RAR index %d is out of range.\n", rar); - return -EINVAL; - } - - wr32(wx, WX_PSR_MAC_SWC_IDX, rar); - mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L); - mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H); - - if (!mpsar_lo && !mpsar_hi) - return 0; - - /* was that the last pool using this rar? */ - if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) - wx_clear_rar(wx, rar); - - return 0; -} - -/** - * wx_init_uta_tables - Initialize the Unicast Table Array - * @wx: pointer to hardware structure - **/ -static void wx_init_uta_tables(struct wx *wx) -{ - int i; - - wx_dbg(wx, " Clearing UTA\n"); - - for (i = 0; i < 128; i++) - wr32(wx, WX_PSR_UC_TBL(i), 0); -} - -/** - * wx_init_rx_addrs - Initializes receive address filters. - * @wx: pointer to hardware structure - * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. - **/ -void wx_init_rx_addrs(struct wx *wx) -{ - u32 rar_entries = wx->mac.num_rar_entries; - u32 psrctl; - int i; - - /* If the current mac address is valid, assume it is a software override - * to the permanent address. - * Otherwise, use the permanent address from the eeprom. - */ - if (!is_valid_ether_addr(wx->mac.addr)) { - /* Get the MAC address from the RAR0 for later reference */ - wx_get_mac_addr(wx, wx->mac.addr); - wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr); - } else { - /* Setup the receive address. */ - wx_dbg(wx, "Overriding MAC Address in RAR[0]\n"); - wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr); - - wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - - if (wx->mac.type == wx_mac_sp) { - /* clear VMDq pool/queue selection for RAR 0 */ - wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); - } - } - - /* Zero out the other receive addresses. */ - wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1); - for (i = 1; i < rar_entries; i++) { - wr32(wx, WX_PSR_MAC_SWC_IDX, i); - wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); - wr32(wx, WX_PSR_MAC_SWC_AD_H, 0); - } - - /* Clear the MTA */ - wx->addr_ctrl.mta_in_use = 0; - psrctl = rd32(wx, WX_PSR_CTL); - psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); - psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; - wr32(wx, WX_PSR_CTL, psrctl); - wx_dbg(wx, " Clearing MTA\n"); - for (i = 0; i < wx->mac.mcft_size; i++) - wr32(wx, WX_PSR_MC_TBL(i), 0); - - wx_init_uta_tables(wx); -} -EXPORT_SYMBOL(wx_init_rx_addrs); - -static void wx_sync_mac_table(struct wx *wx) -{ - int i; - - for (i = 0; i < wx->mac.num_rar_entries; i++) { - if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) { - if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { - wx_set_rar(wx, i, - wx->mac_table[i].addr, - wx->mac_table[i].pools, - WX_PSR_MAC_SWC_AD_H_AV); - } else { - wx_clear_rar(wx, i); - } - wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); - } - } -} - -/* this function destroys the first RAR entry */ -void wx_mac_set_default_filter(struct wx *wx, u8 *addr) -{ - memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); - wx->mac_table[0].pools = 1ULL; - wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); - wx_set_rar(wx, 0, wx->mac_table[0].addr, - wx->mac_table[0].pools, - WX_PSR_MAC_SWC_AD_H_AV); -} -EXPORT_SYMBOL(wx_mac_set_default_filter); - -void wx_flush_sw_mac_table(struct wx *wx) -{ - u32 i; - - for (i = 0; i < wx->mac.num_rar_entries; i++) { - if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE)) - continue; - - wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; - wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; - memset(wx->mac_table[i].addr, 0, ETH_ALEN); - wx->mac_table[i].pools = 0; - } - wx_sync_mac_table(wx); -} -EXPORT_SYMBOL(wx_flush_sw_mac_table); - -static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) -{ - u32 i; - - if (is_zero_ether_addr(addr)) - return -EINVAL; - - for (i = 0; i < wx->mac.num_rar_entries; i++) { - if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { - if (ether_addr_equal(addr, wx->mac_table[i].addr)) { - if (wx->mac_table[i].pools != (1ULL << pool)) { - memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); - wx->mac_table[i].pools |= (1ULL << pool); - wx_sync_mac_table(wx); - return i; - } - } - } - - if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) - continue; - wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED | - WX_MAC_STATE_IN_USE); - memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); - wx->mac_table[i].pools |= (1ULL << pool); - wx_sync_mac_table(wx); - return i; - } - return -ENOMEM; -} - -static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) -{ - u32 i; - - if (is_zero_ether_addr(addr)) - return -EINVAL; - - /* search table for addr, if found, set to 0 and sync */ - for (i = 0; i < wx->mac.num_rar_entries; i++) { - if (!ether_addr_equal(addr, wx->mac_table[i].addr)) - continue; - - wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; - wx->mac_table[i].pools &= ~(1ULL << pool); - if (!wx->mac_table[i].pools) { - wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; - memset(wx->mac_table[i].addr, 0, ETH_ALEN); - } - wx_sync_mac_table(wx); - return 0; - } - return -ENOMEM; -} - -static int wx_available_rars(struct wx *wx) -{ - u32 i, count = 0; - - for (i = 0; i < wx->mac.num_rar_entries; i++) { - if (wx->mac_table[i].state == 0) - count++; - } - - return count; -} - -/** - * wx_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * @pool: index for mac table - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int wx_write_uc_addr_list(struct net_device *netdev, int pool) -{ - struct wx *wx = netdev_priv(netdev); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > wx_available_rars(wx)) - return -ENOMEM; - - if (!netdev_uc_empty(netdev)) { - struct netdev_hw_addr *ha; - - netdev_for_each_uc_addr(ha, netdev) { - wx_del_mac_filter(wx, ha->addr, pool); - wx_add_mac_filter(wx, ha->addr, pool); - count++; - } - } - return count; -} - -/** - * wx_mta_vector - Determines bit-vector in multicast table to set - * @wx: pointer to private structure - * @mc_addr: the multicast address - * - * Extracts the 12 bits, from a multicast address, to determine which - * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in - * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initialization - * to mc_filter_type. - **/ -static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr) -{ - u32 vector = 0; - - switch (wx->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ - vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); - break; - case 1: /* use bits [46:35] of the address */ - vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); - break; - case 2: /* use bits [45:34] of the address */ - vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); - break; - case 3: /* use bits [43:32] of the address */ - vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); - break; - default: /* Invalid mc_filter_type */ - wx_err(wx, "MC filter type param set incorrectly\n"); - break; - } - - /* vector can only be 12-bits or boundary will be exceeded */ - vector &= 0xFFF; - return vector; -} - -/** - * wx_set_mta - Set bit-vector in multicast table - * @wx: pointer to private structure - * @mc_addr: Multicast address - * - * Sets the bit-vector in the multicast table. - **/ -static void wx_set_mta(struct wx *wx, u8 *mc_addr) -{ - u32 vector, vector_bit, vector_reg; - - wx->addr_ctrl.mta_in_use++; - - vector = wx_mta_vector(wx, mc_addr); - wx_dbg(wx, " bit-vector = 0x%03X\n", vector); - - /* The MTA is a register array of 128 32-bit registers. It is treated - * like an array of 4096 bits. We want to set bit - * BitArray[vector_value]. So we figure out what register the bit is - * in, read it, OR in the new bit, then write back the new value. The - * register is determined by the upper 7 bits of the vector value and - * the bit within that register are determined by the lower 5 bits of - * the value. - */ - vector_reg = (vector >> 5) & 0x7F; - vector_bit = vector & 0x1F; - wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit); -} - -/** - * wx_update_mc_addr_list - Updates MAC list of multicast addresses - * @wx: pointer to private structure - * @netdev: pointer to net device structure - * - * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unused receive address - * registers for the first multicast addresses, and hashes the rest into the - * multicast table. - **/ -static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) -{ - struct netdev_hw_addr *ha; - u32 i, psrctl; - - /* Set the new number of MC addresses that we are being requested to - * use. - */ - wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); - wx->addr_ctrl.mta_in_use = 0; - - /* Clear mta_shadow */ - wx_dbg(wx, " Clearing MTA\n"); - memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow)); - - /* Update mta_shadow */ - netdev_for_each_mc_addr(ha, netdev) { - wx_dbg(wx, " Adding the multicast addresses:\n"); - wx_set_mta(wx, ha->addr); - } - - /* Enable mta */ - for (i = 0; i < wx->mac.mcft_size; i++) - wr32a(wx, WX_PSR_MC_TBL(0), i, - wx->mac.mta_shadow[i]); - - if (wx->addr_ctrl.mta_in_use > 0) { - psrctl = rd32(wx, WX_PSR_CTL); - psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); - psrctl |= WX_PSR_CTL_MFE | - (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT); - wr32(wx, WX_PSR_CTL, psrctl); - } - - wx_dbg(wx, "Update mc addr list Complete\n"); -} - -/** - * wx_write_mc_addr_list - write multicast addresses to MTA - * @netdev: network interface device structure - * - * Writes multicast address list to the MTA hash table. - * Returns: 0 on no addresses written - * X on writing X addresses to MTA - **/ -static int wx_write_mc_addr_list(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - - if (!netif_running(netdev)) - return 0; - - wx_update_mc_addr_list(wx, netdev); - - return netdev_mc_count(netdev); -} - -/** - * wx_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -int wx_set_mac(struct net_device *netdev, void *p) -{ - struct wx *wx = netdev_priv(netdev); - struct sockaddr *addr = p; - int retval; - - retval = eth_prepare_mac_addr_change(netdev, addr); - if (retval) - return retval; - - wx_del_mac_filter(wx, wx->mac.addr, 0); - eth_hw_addr_set(netdev, addr->sa_data); - memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); - - wx_mac_set_default_filter(wx, wx->mac.addr); - - return 0; -} -EXPORT_SYMBOL(wx_set_mac); - -void wx_disable_rx(struct wx *wx) -{ - u32 pfdtxgswc; - u32 rxctrl; - - rxctrl = rd32(wx, WX_RDB_PB_CTL); - if (rxctrl & WX_RDB_PB_CTL_RXEN) { - pfdtxgswc = rd32(wx, WX_PSR_CTL); - if (pfdtxgswc & WX_PSR_CTL_SW_EN) { - pfdtxgswc &= ~WX_PSR_CTL_SW_EN; - wr32(wx, WX_PSR_CTL, pfdtxgswc); - wx->mac.set_lben = true; - } else { - wx->mac.set_lben = false; - } - rxctrl &= ~WX_RDB_PB_CTL_RXEN; - wr32(wx, WX_RDB_PB_CTL, rxctrl); - - if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { - /* disable mac receiver */ - wr32m(wx, WX_MAC_RX_CFG, - WX_MAC_RX_CFG_RE, 0); - } - } -} -EXPORT_SYMBOL(wx_disable_rx); - -static void wx_enable_rx(struct wx *wx) -{ - u32 psrctl; - - /* enable mac receiver */ - wr32m(wx, WX_MAC_RX_CFG, - WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); - - wr32m(wx, WX_RDB_PB_CTL, - WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN); - - if (wx->mac.set_lben) { - psrctl = rd32(wx, WX_PSR_CTL); - psrctl |= WX_PSR_CTL_SW_EN; - wr32(wx, WX_PSR_CTL, psrctl); - wx->mac.set_lben = false; - } -} - -/** - * wx_set_rxpba - Initialize Rx packet buffer - * @wx: pointer to private structure - **/ -static void wx_set_rxpba(struct wx *wx) -{ - u32 rxpktsize, txpktsize, txpbthresh; - - rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; - wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); - - /* Only support an equally distributed Tx packet buffer strategy. */ - txpktsize = wx->mac.tx_pb_size; - txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX; - wr32(wx, WX_TDB_PB_SZ(0), txpktsize); - wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); -} - -static void wx_configure_port(struct wx *wx) -{ - u32 value, i; - - value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; - wr32m(wx, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_D_VLAN | - WX_CFG_PORT_CTL_QINQ, - value); - - wr32(wx, WX_CFG_TAG_TPID(0), - ETH_P_8021Q | ETH_P_8021AD << 16); - wx->tpid[0] = ETH_P_8021Q; - wx->tpid[1] = ETH_P_8021AD; - for (i = 1; i < 4; i++) - wr32(wx, WX_CFG_TAG_TPID(i), - ETH_P_8021Q | ETH_P_8021Q << 16); - for (i = 2; i < 8; i++) - wx->tpid[i] = ETH_P_8021Q; -} - -/** - * wx_disable_sec_rx_path - Stops the receive data path - * @wx: pointer to private structure - * - * Stops the receive data path and waits for the HW to internally empty - * the Rx security block - **/ -static int wx_disable_sec_rx_path(struct wx *wx) -{ - u32 secrx; - - wr32m(wx, WX_RSC_CTL, - WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS); - - return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, - 1000, 40000, false, wx, WX_RSC_ST); -} - -/** - * wx_enable_sec_rx_path - Enables the receive data path - * @wx: pointer to private structure - * - * Enables the receive data path. - **/ -static void wx_enable_sec_rx_path(struct wx *wx) -{ - wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); - WX_WRITE_FLUSH(wx); -} - -static void wx_vlan_strip_control(struct wx *wx, bool enable) -{ - int i, j; - - for (i = 0; i < wx->num_rx_queues; i++) { - struct wx_ring *ring = wx->rx_ring[i]; - - j = ring->reg_idx; - wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN, - enable ? WX_PX_RR_CFG_VLAN : 0); - } -} - -void wx_set_rx_mode(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - netdev_features_t features; - u32 fctrl, vmolr, vlnctrl; - int count; - - features = netdev->features; - - /* Check for Promiscuous and All Multicast modes */ - fctrl = rd32(wx, WX_PSR_CTL); - fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); - vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); - vmolr &= ~(WX_PSR_VM_L2CTL_UPE | - WX_PSR_VM_L2CTL_MPE | - WX_PSR_VM_L2CTL_ROPE | - WX_PSR_VM_L2CTL_ROMPE); - vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); - vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN); - - /* set all bits that we expect to always be set */ - fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE; - vmolr |= WX_PSR_VM_L2CTL_BAM | - WX_PSR_VM_L2CTL_AUPE | - WX_PSR_VM_L2CTL_VACC; - vlnctrl |= WX_PSR_VLAN_CTL_VFE; - - wx->addr_ctrl.user_set_promisc = false; - if (netdev->flags & IFF_PROMISC) { - wx->addr_ctrl.user_set_promisc = true; - fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; - /* pf don't want packets routing to vf, so clear UPE */ - vmolr |= WX_PSR_VM_L2CTL_MPE; - vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; - } - - if (netdev->flags & IFF_ALLMULTI) { - fctrl |= WX_PSR_CTL_MPE; - vmolr |= WX_PSR_VM_L2CTL_MPE; - } - - if (netdev->features & NETIF_F_RXALL) { - vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE); - vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; - /* receive bad packets */ - wr32m(wx, WX_RSC_CTL, - WX_RSC_CTL_SAVE_MAC_ERR, - WX_RSC_CTL_SAVE_MAC_ERR); - } else { - vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE; - } - - /* Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = wx_write_uc_addr_list(netdev, 0); - if (count < 0) { - vmolr &= ~WX_PSR_VM_L2CTL_ROPE; - vmolr |= WX_PSR_VM_L2CTL_UPE; - } - - /* Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - count = wx_write_mc_addr_list(netdev); - if (count < 0) { - vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; - vmolr |= WX_PSR_VM_L2CTL_MPE; - } - - wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); - wr32(wx, WX_PSR_CTL, fctrl); - wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); - - if ((features & NETIF_F_HW_VLAN_CTAG_RX) && - (features & NETIF_F_HW_VLAN_STAG_RX)) - wx_vlan_strip_control(wx, true); - else - wx_vlan_strip_control(wx, false); - -} -EXPORT_SYMBOL(wx_set_rx_mode); - -static void wx_set_rx_buffer_len(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - u32 mhadd, max_frame; - - max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - /* adjust max frame to be at least the size of a standard frame */ - if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) - max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); - - mhadd = rd32(wx, WX_PSR_MAX_SZ); - if (max_frame != mhadd) - wr32(wx, WX_PSR_MAX_SZ, max_frame); -} - -/** - * wx_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -int wx_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct wx *wx = netdev_priv(netdev); - - netdev->mtu = new_mtu; - wx_set_rx_buffer_len(wx); - - return 0; -} -EXPORT_SYMBOL(wx_change_mtu); - -/* Disable the specified rx queue */ -void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring) -{ - u8 reg_idx = ring->reg_idx; - u32 rxdctl; - int ret; - - /* write value back with RRCFG.EN bit cleared */ - wr32m(wx, WX_PX_RR_CFG(reg_idx), - WX_PX_RR_CFG_RR_EN, 0); - - /* the hardware may take up to 100us to really disable the rx queue */ - ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN), - 10, 100, true, wx, WX_PX_RR_CFG(reg_idx)); - - if (ret == -ETIMEDOUT) { - /* Just for information */ - wx_err(wx, - "RRCFG.EN on Rx queue %d not cleared within the polling period\n", - reg_idx); - } -} -EXPORT_SYMBOL(wx_disable_rx_queue); - -static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) -{ - u8 reg_idx = ring->reg_idx; - u32 rxdctl; - int ret; - - ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN, - 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx)); - - if (ret == -ETIMEDOUT) { - /* Just for information */ - wx_err(wx, - "RRCFG.EN on Rx queue %d not set within the polling period\n", - reg_idx); - } -} - -static void wx_configure_srrctl(struct wx *wx, - struct wx_ring *rx_ring) -{ - u16 reg_idx = rx_ring->reg_idx; - u32 srrctl; - - srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); - srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ | - WX_PX_RR_CFG_RR_BUF_SZ | - WX_PX_RR_CFG_SPLIT_MODE); - /* configure header buffer length, needed for RSC */ - srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT; - - /* configure the packet buffer length */ - srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; - - wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); -} - -static void wx_configure_tx_ring(struct wx *wx, - struct wx_ring *ring) -{ - u32 txdctl = WX_PX_TR_CFG_ENABLE; - u8 reg_idx = ring->reg_idx; - u64 tdba = ring->dma; - int ret; - - /* disable queue to avoid issues while updating state */ - wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); - WX_WRITE_FLUSH(wx); - - wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); - wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba)); - - /* reset head and tail pointers */ - wr32(wx, WX_PX_TR_RP(reg_idx), 0); - wr32(wx, WX_PX_TR_WP(reg_idx), 0); - ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx); - - if (ring->count < WX_MAX_TXD) - txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; - txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; - - /* reinitialize tx_buffer_info */ - memset(ring->tx_buffer_info, 0, - sizeof(struct wx_tx_buffer) * ring->count); - - /* enable queue */ - wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl); - - /* poll to verify queue is enabled */ - ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE, - 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx)); - if (ret == -ETIMEDOUT) - wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx); -} - -static void wx_configure_rx_ring(struct wx *wx, - struct wx_ring *ring) -{ - u16 reg_idx = ring->reg_idx; - union wx_rx_desc *rx_desc; - u64 rdba = ring->dma; - u32 rxdctl; - - /* disable queue to avoid issues while updating state */ - rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); - wx_disable_rx_queue(wx, ring); - - wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); - wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba)); - - if (ring->count == WX_MAX_RXD) - rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT; - else - rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT; - - rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT; - wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl); - - /* reset head and tail pointers */ - wr32(wx, WX_PX_RR_RP(reg_idx), 0); - wr32(wx, WX_PX_RR_WP(reg_idx), 0); - ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx); - - wx_configure_srrctl(wx, ring); - - /* initialize rx_buffer_info */ - memset(ring->rx_buffer_info, 0, - sizeof(struct wx_rx_buffer) * ring->count); - - /* initialize Rx descriptor 0 */ - rx_desc = WX_RX_DESC(ring, 0); - rx_desc->wb.upper.length = 0; - - /* enable receive descriptor ring */ - wr32m(wx, WX_PX_RR_CFG(reg_idx), - WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN); - - wx_enable_rx_queue(wx, ring); - wx_alloc_rx_buffers(ring, wx_desc_unused(ring)); -} - -/** - * wx_configure_tx - Configure Transmit Unit after Reset - * @wx: pointer to private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void wx_configure_tx(struct wx *wx) -{ - u32 i; - - /* TDM_CTL.TE must be before Tx queues are enabled */ - wr32m(wx, WX_TDM_CTL, - WX_TDM_CTL_TE, WX_TDM_CTL_TE); - - /* Setup the HW Tx Head and Tail descriptor pointers */ - for (i = 0; i < wx->num_tx_queues; i++) - wx_configure_tx_ring(wx, wx->tx_ring[i]); - - wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10); - - if (wx->mac.type == wx_mac_em) - wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1); - - /* enable mac transmitter */ - wr32m(wx, WX_MAC_TX_CFG, - WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE); -} - -static void wx_restore_vlan(struct wx *wx) -{ - u16 vid = 1; - - wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0); - - for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID) - wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); -} - -/** - * wx_configure_rx - Configure Receive Unit after Reset - * @wx: pointer to private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -void wx_configure_rx(struct wx *wx) -{ - u32 psrtype, i; - int ret; - - wx_disable_rx(wx); - - psrtype = WX_RDB_PL_CFG_L4HDR | - WX_RDB_PL_CFG_L3HDR | - WX_RDB_PL_CFG_L2HDR | - WX_RDB_PL_CFG_TUN_TUNHDR; - wr32(wx, WX_RDB_PL_CFG(0), psrtype); - - /* enable hw crc stripping */ - wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); - - if (wx->mac.type == wx_mac_sp) { - u32 psrctl; - - /* RSC Setup */ - psrctl = rd32(wx, WX_PSR_CTL); - psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ - psrctl |= WX_PSR_CTL_RSC_DIS; - wr32(wx, WX_PSR_CTL, psrctl); - } - - /* set_rx_buffer_len must be called before ring initialization */ - wx_set_rx_buffer_len(wx); - - /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - for (i = 0; i < wx->num_rx_queues; i++) - wx_configure_rx_ring(wx, wx->rx_ring[i]); - - /* Enable all receives, disable security engine prior to block traffic */ - ret = wx_disable_sec_rx_path(wx); - if (ret < 0) - wx_err(wx, "The register status is abnormal, please check device."); - - wx_enable_rx(wx); - wx_enable_sec_rx_path(wx); -} -EXPORT_SYMBOL(wx_configure_rx); - -static void wx_configure_isb(struct wx *wx) -{ - /* set ISB Address */ - wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32)); - if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) - wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma)); -} - -void wx_configure(struct wx *wx) -{ - wx_set_rxpba(wx); - wx_configure_port(wx); - - wx_set_rx_mode(wx->netdev); - wx_restore_vlan(wx); - wx_enable_sec_rx_path(wx); - - wx_configure_tx(wx); - wx_configure_rx(wx); - wx_configure_isb(wx); -} -EXPORT_SYMBOL(wx_configure); - -/** - * wx_disable_pcie_master - Disable PCI-express master access - * @wx: pointer to hardware structure - * - * Disables PCI-Express master access and verifies there are no pending - * requests. - **/ -int wx_disable_pcie_master(struct wx *wx) -{ - int status = 0; - u32 val; - - /* Always set this bit to ensure any future transactions are blocked */ - pci_clear_master(wx->pdev); - - /* Exit if master requests are blocked */ - if (!(rd32(wx, WX_PX_TRANSACTION_PENDING))) - return 0; - - /* Poll for master request bit to clear */ - status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT, - false, wx, WX_PX_TRANSACTION_PENDING); - if (status < 0) - wx_err(wx, "PCIe transaction pending bit did not clear.\n"); - - return status; -} -EXPORT_SYMBOL(wx_disable_pcie_master); - -/** - * wx_stop_adapter - Generic stop Tx/Rx units - * @wx: pointer to hardware structure - * - * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. - **/ -int wx_stop_adapter(struct wx *wx) -{ - u16 i; - - /* Set the adapter_stopped flag so other driver functions stop touching - * the hardware - */ - wx->adapter_stopped = true; - - /* Disable the receive unit */ - wx_disable_rx(wx); - - /* Set interrupt mask to stop interrupts from being generated */ - wx_intr_disable(wx, WX_INTR_ALL); - - /* Clear any pending interrupts, flush previous writes */ - wr32(wx, WX_PX_MISC_IC, 0xffffffff); - wr32(wx, WX_BME_CTL, 0x3); - - /* Disable the transmit unit. Each queue must be disabled. */ - for (i = 0; i < wx->mac.max_tx_queues; i++) { - wr32m(wx, WX_PX_TR_CFG(i), - WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE, - WX_PX_TR_CFG_SWFLSH); - } - - /* Disable the receive unit by stopping each queue */ - for (i = 0; i < wx->mac.max_rx_queues; i++) { - wr32m(wx, WX_PX_RR_CFG(i), - WX_PX_RR_CFG_RR_EN, 0); - } - - /* flush all queues disables */ - WX_WRITE_FLUSH(wx); - - /* Prevent the PCI-E bus from hanging by disabling PCI-E master - * access and verify no pending requests - */ - return wx_disable_pcie_master(wx); -} -EXPORT_SYMBOL(wx_stop_adapter); - -void wx_reset_misc(struct wx *wx) -{ - int i; - - /* receive packets that size > 2048 */ - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); - - /* clear counters on read */ - wr32m(wx, WX_MMC_CONTROL, - WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD); - - wr32m(wx, WX_MAC_RX_FLOW_CTRL, - WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); - - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - - wr32m(wx, WX_MIS_RST_ST, - WX_MIS_RST_ST_RST_INIT, 0x1E00); - - /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ - wr32(wx, WX_PSR_MNG_FLEX_SEL, 0); - for (i = 0; i < 16; i++) { - wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0); - wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0); - wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0); - } - wr32(wx, WX_PSR_LAN_FLEX_SEL, 0); - for (i = 0; i < 16; i++) { - wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0); - wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0); - wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0); - } - - /* set pause frame dst mac addr */ - wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001); - wr32(wx, WX_RDB_PFCMACDAH, 0x0180); -} -EXPORT_SYMBOL(wx_reset_misc); - -/** - * wx_get_pcie_msix_counts - Gets MSI-X vector count - * @wx: pointer to hardware structure - * @msix_count: number of MSI interrupts that can be obtained - * @max_msix_count: number of MSI interrupts that mac need - * - * Read PCIe configuration space, and get the MSI-X vector count from - * the capabilities table. - **/ -int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) -{ - struct pci_dev *pdev = wx->pdev; - struct device *dev = &pdev->dev; - int pos; - - *msix_count = 1; - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (!pos) { - dev_err(dev, "Unable to find MSI-X Capabilities\n"); - return -EINVAL; - } - pci_read_config_word(pdev, - pos + PCI_MSIX_FLAGS, - msix_count); - *msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK; - /* MSI-X count is zero-based in HW */ - *msix_count += 1; - - if (*msix_count > max_msix_count) - *msix_count = max_msix_count; - - return 0; -} -EXPORT_SYMBOL(wx_get_pcie_msix_counts); - -int wx_sw_init(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - u32 ssid = 0; - int err = 0; - - wx->vendor_id = pdev->vendor; - wx->device_id = pdev->device; - wx->revision_id = pdev->revision; - wx->oem_svid = pdev->subsystem_vendor; - wx->oem_ssid = pdev->subsystem_device; - wx->bus.device = PCI_SLOT(pdev->devfn); - wx->bus.func = PCI_FUNC(pdev->devfn); - - if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { - wx->subsystem_vendor_id = pdev->subsystem_vendor; - wx->subsystem_device_id = pdev->subsystem_device; - } else { - err = wx_flash_read_dword(wx, 0xfffdc, &ssid); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); - return err; - } - - wx->subsystem_device_id = swab16((u16)ssid); - } - - wx->mac_table = kcalloc(wx->mac.num_rar_entries, - sizeof(struct wx_mac_addr), - GFP_KERNEL); - if (!wx->mac_table) { - wx_err(wx, "mac_table allocation failed\n"); - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL(wx_sw_init); - -/** - * wx_find_vlvf_slot - find the vlanid or the first empty slot - * @wx: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * - * return the VLVF index where this VLAN id should be placed - * - **/ -static int wx_find_vlvf_slot(struct wx *wx, u32 vlan) -{ - u32 bits = 0, first_empty_slot = 0; - int regindex; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way - */ - for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) { - wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex); - bits = rd32(wx, WX_PSR_VLAN_SWC); - if (!bits && !(first_empty_slot)) - first_empty_slot = regindex; - else if ((bits & 0x0FFF) == vlan) - break; - } - - if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) { - if (first_empty_slot) - regindex = first_empty_slot; - else - regindex = -ENOMEM; - } - - return regindex; -} - -/** - * wx_set_vlvf - Set VLAN Pool Filter - * @wx: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF - * @vfta_changed: pointer to boolean flag which indicates whether VFTA - * should be changed - * - * Turn on/off specified bit in VLVF table. - **/ -static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, - bool *vfta_changed) -{ - int vlvf_index; - u32 vt, bits; - - /* If VT Mode is set - * Either vlan_on - * make sure the vlan is in VLVF - * set the vind bit in the matching VLVFB - * Or !vlan_on - * clear the pool bit and possibly the vind - */ - vt = rd32(wx, WX_CFG_PORT_CTL); - if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK)) - return 0; - - vlvf_index = wx_find_vlvf_slot(wx, vlan); - if (vlvf_index < 0) - return vlvf_index; - - wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index); - if (vlan_on) { - /* set the pool bit */ - if (vind < 32) { - bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); - bits |= (1 << vind); - wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits); - } else { - bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); - bits |= (1 << (vind - 32)); - wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits); - } - } else { - /* clear the pool bit */ - if (vind < 32) { - bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); - bits &= ~(1 << vind); - wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits); - bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); - } else { - bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); - bits &= ~(1 << (vind - 32)); - wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits); - bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); - } - } - - if (bits) { - wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan)); - if (!vlan_on && vfta_changed) - *vfta_changed = false; - } else { - wr32(wx, WX_PSR_VLAN_SWC, 0); - } - - return 0; -} - -/** - * wx_set_vfta - Set VLAN filter table - * @wx: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF - * - * Turn on/off specified VLAN in the VLAN filter table. - **/ -static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) -{ - u32 bitindex, vfta, targetbit; - bool vfta_changed = false; - int regindex, ret; - - /* this is a 2 part operation - first the VFTA, then the - * VLVF and VLVFB if VT Mode is set - * We don't write the VFTA until we know the VLVF part succeeded. - */ - - /* Part 1 - * The VFTA is a bitstring made up of 128 32-bit registers - * that enable the particular VLAN id, much like the MTA: - * bits[11-5]: which register - * bits[4-0]: which bit in the register - */ - regindex = (vlan >> 5) & 0x7F; - bitindex = vlan & 0x1F; - targetbit = (1 << bitindex); - /* errata 5 */ - vfta = wx->mac.vft_shadow[regindex]; - if (vlan_on) { - if (!(vfta & targetbit)) { - vfta |= targetbit; - vfta_changed = true; - } - } else { - if ((vfta & targetbit)) { - vfta &= ~targetbit; - vfta_changed = true; - } - } - /* Part 2 - * Call wx_set_vlvf to set VLVFB and VLVF - */ - ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed); - if (ret != 0) - return ret; - - if (vfta_changed) - wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta); - wx->mac.vft_shadow[regindex] = vfta; - - return 0; -} - -/** - * wx_clear_vfta - Clear VLAN filter table - * @wx: pointer to hardware structure - * - * Clears the VLAN filer table, and the VMDq index associated with the filter - **/ -static void wx_clear_vfta(struct wx *wx) -{ - u32 offset; - - for (offset = 0; offset < wx->mac.vft_size; offset++) { - wr32(wx, WX_PSR_VLAN_TBL(offset), 0); - wx->mac.vft_shadow[offset] = 0; - } - - for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) { - wr32(wx, WX_PSR_VLAN_SWC_IDX, offset); - wr32(wx, WX_PSR_VLAN_SWC, 0); - wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0); - wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0); - } -} - -int wx_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) -{ - struct wx *wx = netdev_priv(netdev); - - /* add VID to filter table */ - wx_set_vfta(wx, vid, VMDQ_P(0), true); - set_bit(vid, wx->active_vlans); - - return 0; -} -EXPORT_SYMBOL(wx_vlan_rx_add_vid); - -int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) -{ - struct wx *wx = netdev_priv(netdev); - - /* remove VID from filter table */ - if (vid) - wx_set_vfta(wx, vid, VMDQ_P(0), false); - clear_bit(vid, wx->active_vlans); - - return 0; -} -EXPORT_SYMBOL(wx_vlan_rx_kill_vid); - -/** - * wx_start_hw - Prepare hardware for Tx/Rx - * @wx: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. - **/ -void wx_start_hw(struct wx *wx) -{ - int i; - - /* Clear the VLAN filter table */ - wx_clear_vfta(wx); - WX_WRITE_FLUSH(wx); - /* Clear the rate limiters */ - for (i = 0; i < wx->mac.max_tx_queues; i++) { - wr32(wx, WX_TDM_RP_IDX, i); - wr32(wx, WX_TDM_RP_RATE, 0); - } -} -EXPORT_SYMBOL(wx_start_hw); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h deleted file mode 100644 index 0b3447bc6f2fec53fac9ccd5144430380b17d46b..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _WX_HW_H_ -#define _WX_HW_H_ - -void wx_intr_enable(struct wx *wx, u64 qmask); -void wx_irq_disable(struct wx *wx); -int wx_check_flash_load(struct wx *wx, u32 check_bit); -void wx_control_hw(struct wx *wx, bool drv); -int wx_mng_present(struct wx *wx); -int wx_host_interface_command(struct wx *wx, u32 *buffer, - u32 length, u32 timeout, bool return_data); -int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); -int wx_read_ee_hostif_buffer(struct wx *wx, - u16 offset, u16 words, u16 *data); -void wx_init_eeprom_params(struct wx *wx); -void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); -void wx_init_rx_addrs(struct wx *wx); -void wx_mac_set_default_filter(struct wx *wx, u8 *addr); -void wx_flush_sw_mac_table(struct wx *wx); -int wx_set_mac(struct net_device *netdev, void *p); -void wx_disable_rx(struct wx *wx); -void wx_set_rx_mode(struct net_device *netdev); -int wx_change_mtu(struct net_device *netdev, int new_mtu); -void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); -void wx_configure_rx(struct wx *wx); -void wx_configure(struct wx *wx); -void wx_start_hw(struct wx *wx); -int wx_disable_pcie_master(struct wx *wx); -int wx_stop_adapter(struct wx *wx); -void wx_reset_misc(struct wx *wx); -int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); -int wx_sw_init(struct wx *wx); -int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); -int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); - -#endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c deleted file mode 100644 index c019fe964eceaf9f29dea827ed9230a0f21a15bc..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ /dev/null @@ -1,2662 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "wx_type.h" -#include "wx_lib.h" -#include "wx_hw.h" - -/* Lookup table mapping the HW PTYPE to the bit field for decoding */ -static struct wx_dec_ptype wx_ptype_lookup[256] = { - /* L2: mac */ - [0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), - [0x12] = WX_PTT(L2, NONE, NONE, NONE, TS, PAY2), - [0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), - [0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), - [0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), - [0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), - [0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), - - /* L2: ethertype filter */ - [0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), - - /* L3: ip non-tunnel */ - [0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3), - [0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3), - [0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP, PAY4), - [0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP, PAY4), - [0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4), - [0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3), - [0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3), - [0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP, PAY3), - [0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP, PAY4), - [0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4), - - /* L2: fcoe */ - [0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), - [0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), - - /* IPv4 --> IPv4/IPv6 */ - [0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3), - [0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3), - [0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP, PAY4), - [0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP, PAY4), - [0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4), - [0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3), - [0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3), - [0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP, PAY4), - [0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP, PAY4), - [0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4), - - /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ - [0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3), - [0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3), - [0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3), - [0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP, PAY4), - [0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP, PAY4), - [0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4), - [0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3), - [0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3), - [0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP, PAY4), - [0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP, PAY4), - [0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ - [0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3), - [0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3), - [0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3), - [0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP, PAY4), - [0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP, PAY4), - [0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4), - [0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3), - [0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3), - [0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP, PAY4), - [0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP, PAY4), - [0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ - [0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3), - [0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3), - [0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3), - [0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP, PAY4), - [0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP, PAY4), - [0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4), - [0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3), - [0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3), - [0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP, PAY4), - [0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP, PAY4), - [0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4), - - /* IPv6 --> IPv4/IPv6 */ - [0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3), - [0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3), - [0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP, PAY4), - [0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP, PAY4), - [0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4), - [0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3), - [0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3), - [0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP, PAY4), - [0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP, PAY4), - [0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4), - - /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ - [0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3), - [0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3), - [0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3), - [0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP, PAY4), - [0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP, PAY4), - [0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4), - [0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3), - [0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3), - [0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP, PAY4), - [0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP, PAY4), - [0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ - [0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3), - [0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3), - [0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3), - [0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP, PAY4), - [0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP, PAY4), - [0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4), - [0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3), - [0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3), - [0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP, PAY4), - [0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP, PAY4), - [0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ - [0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3), - [0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3), - [0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3), - [0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP, PAY4), - [0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP, PAY4), - [0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4), - [0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3), - [0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3), - [0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP, PAY4), - [0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP, PAY4), - [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), -}; - -static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) -{ - return wx_ptype_lookup[ptype]; -} - -/* wx_test_staterr - tests bits in Rx descriptor status and error fields */ -static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, - const u32 stat_err_bits) -{ - return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); -} - -static void wx_dma_sync_frag(struct wx_ring *rx_ring, - struct wx_rx_buffer *rx_buffer) -{ - struct sk_buff *skb = rx_buffer->skb; - skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; - - dma_sync_single_range_for_cpu(rx_ring->dev, - WX_CB(skb)->dma, - skb_frag_off(frag), - skb_frag_size(frag), - DMA_FROM_DEVICE); - - /* If the page was released, just unmap it. */ - if (unlikely(WX_CB(skb)->page_released)) - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); -} - -static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, - union wx_rx_desc *rx_desc, - struct sk_buff **skb, - int *rx_buffer_pgcnt) -{ - struct wx_rx_buffer *rx_buffer; - unsigned int size; - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - size = le16_to_cpu(rx_desc->wb.upper.length); - -#if (PAGE_SIZE < 8192) - *rx_buffer_pgcnt = page_count(rx_buffer->page); -#else - *rx_buffer_pgcnt = 0; -#endif - - prefetchw(rx_buffer->page); - *skb = rx_buffer->skb; - - /* Delay unmapping of the first packet. It carries the header - * information, HW may still access the header after the writeback. - * Only unmap it when EOP is reached - */ - if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) { - if (!*skb) - goto skip_sync; - } else { - if (*skb) - wx_dma_sync_frag(rx_ring, rx_buffer); - } - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); -skip_sync: - return rx_buffer; -} - -static void wx_put_rx_buffer(struct wx_ring *rx_ring, - struct wx_rx_buffer *rx_buffer, - struct sk_buff *skb, - int rx_buffer_pgcnt) -{ - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) - /* the page has been released from the ring */ - WX_CB(skb)->page_released = true; - - /* clear contents of rx_buffer */ - rx_buffer->page = NULL; - rx_buffer->skb = NULL; -} - -static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, - struct wx_rx_buffer *rx_buffer, - union wx_rx_desc *rx_desc) -{ - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); -#if (PAGE_SIZE < 8192) - unsigned int truesize = WX_RX_BUFSZ; -#else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); -#endif - struct sk_buff *skb = rx_buffer->skb; - - if (!skb) { - void *page_addr = page_address(rx_buffer->page) + - rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif - - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); - if (unlikely(!skb)) - return NULL; - - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - - if (size <= WX_RXBUFFER_256) { - memcpy(__skb_put(skb, size), page_addr, - ALIGN(size, sizeof(long))); - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); - return skb; - } - - skb_mark_for_recycle(skb); - - if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) - WX_CB(skb)->dma = rx_buffer->dma; - - skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset, - size, truesize); - goto out; - - } else { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); - } - -out: -#if (PAGE_SIZE < 8192) - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; -#endif - - return skb; -} - -static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, - struct wx_rx_buffer *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) - return true; - - page = page_pool_dev_alloc_pages(rx_ring->page_pool); - if (unlikely(!page)) - return false; - dma = page_pool_get_dma_addr(page); - - bi->page_dma = dma; - bi->page = page; - bi->page_offset = 0; - - return true; -} - -/** - * wx_alloc_rx_buffers - Replace used receive buffers - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace - **/ -void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) -{ - u16 i = rx_ring->next_to_use; - union wx_rx_desc *rx_desc; - struct wx_rx_buffer *bi; - - /* nothing to do */ - if (!cleaned_count) - return; - - rx_desc = WX_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; - i -= rx_ring->count; - - do { - if (!wx_alloc_mapped_page(rx_ring, bi)) - break; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - WX_RX_BUFSZ, - DMA_FROM_DEVICE); - - rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); - - rx_desc++; - bi++; - i++; - if (unlikely(!i)) { - rx_desc = WX_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_buffer_info; - i -= rx_ring->count; - } - - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.upper.status_error = 0; - - cleaned_count--; - } while (cleaned_count); - - i += rx_ring->count; - - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = i; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i, rx_ring->tail); - } -} - -u16 wx_desc_unused(struct wx_ring *ring) -{ - u16 ntc = ring->next_to_clean; - u16 ntu = ring->next_to_use; - - return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; -} - -/** - * wx_is_non_eop - process handling of non-EOP buffers - * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress - * - * This function updates next to clean. If the buffer is an EOP buffer - * this function exits returning false, otherwise it will place the - * sk_buff in the next buffer to be chained and return true indicating - * that this is in fact a non-EOP buffer. - **/ -static bool wx_is_non_eop(struct wx_ring *rx_ring, - union wx_rx_desc *rx_desc, - struct sk_buff *skb) -{ - u32 ntc = rx_ring->next_to_clean + 1; - - /* fetch, update, and store next to clean */ - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - - prefetch(WX_RX_DESC(rx_ring, ntc)); - - /* if we are the last buffer then there is nothing else to do */ - if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) - return false; - - rx_ring->rx_buffer_info[ntc].skb = skb; - - return true; -} - -static void wx_pull_tail(struct sk_buff *skb) -{ - skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; - unsigned int pull_len; - unsigned char *va; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - skb_frag_off_add(frag, pull_len); - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** - * wx_cleanup_headers - Correct corrupted or empty headers - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being fixed - * - * Check for corrupted packet headers caused by senders on the local L2 - * embedded NIC switch not setting up their Tx Descriptors right. These - * should be very rare. - * - * Also address the case where we are pulling data in on pages only - * and as such no data is present in the skb header. - * - * In addition if skb is not at least 60 bytes we need to pad it so that - * it is large enough to qualify as a valid Ethernet frame. - * - * Returns true if an error was encountered and skb was freed. - **/ -static bool wx_cleanup_headers(struct wx_ring *rx_ring, - union wx_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct net_device *netdev = rx_ring->netdev; - - /* verify that the packet does not have any known errors */ - if (!netdev || - unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) && - !(netdev->features & NETIF_F_RXALL))) { - dev_kfree_skb_any(skb); - return true; - } - - /* place header in linear portion of buffer */ - if (!skb_headlen(skb)) - wx_pull_tail(skb); - - /* if eth_skb_pad returns an error the skb was freed */ - if (eth_skb_pad(skb)) - return true; - - return false; -} - -static void wx_rx_hash(struct wx_ring *ring, - union wx_rx_desc *rx_desc, - struct sk_buff *skb) -{ - u16 rss_type; - - if (!(ring->netdev->features & NETIF_F_RXHASH)) - return; - - rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & - WX_RXD_RSSTYPE_MASK; - - if (!rss_type) - return; - - skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - (WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? - PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); -} - -/** - * wx_rx_checksum - indicate in skb if hw indicated a good cksum - * @ring: structure containing ring specific data - * @rx_desc: current Rx descriptor being processed - * @skb: skb currently being received and modified - **/ -static void wx_rx_checksum(struct wx_ring *ring, - union wx_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc)); - - skb_checksum_none_assert(skb); - /* Rx csum disabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) - return; - - /* if IPv4 header checksum error */ - if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) && - wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) || - (wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) && - wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) { - ring->rx_stats.csum_err++; - return; - } - - /* L4 checksum offload flag must set for the below code to work */ - if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS)) - return; - - /* Hardware can't guarantee csum if IPv6 Dest Header found */ - if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc)) - return; - - /* if L4 checksum error */ - if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) { - ring->rx_stats.csum_err++; - return; - } - - /* It must be a TCP or UDP or SCTP packet with a valid checksum */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* If there is an outer header present that might contain a checksum - * we need to bump the checksum level by 1 to reflect the fact that - * we are indicating we validated the inner checksum. - */ - if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG) - __skb_incr_checksum_unnecessary(skb); - ring->rx_stats.csum_good_cnt++; -} - -static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, - struct sk_buff *skb) -{ - u16 ethertype; - u8 idx = 0; - - if ((ring->netdev->features & - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) && - wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) { - idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & - 0x1c0) >> 6; - ethertype = ring->q_vector->wx->tpid[idx]; - __vlan_hwaccel_put_tag(skb, htons(ethertype), - le16_to_cpu(rx_desc->wb.upper.vlan)); - } -} - -/** - * wx_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being populated - * - * This function checks the ring, descriptor, and packet information in - * order to populate the hash, checksum, protocol, and - * other fields within the skb. - **/ -static void wx_process_skb_fields(struct wx_ring *rx_ring, - union wx_rx_desc *rx_desc, - struct sk_buff *skb) -{ - wx_rx_hash(rx_ring, rx_desc, skb); - wx_rx_checksum(rx_ring, rx_desc, skb); - wx_rx_vlan(rx_ring, rx_desc, skb); - skb_record_rx_queue(skb, rx_ring->queue_index); - skb->protocol = eth_type_trans(skb, rx_ring->netdev); -} - -/** - * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf - * @q_vector: structure containing interrupt and ring information - * @rx_ring: rx descriptor ring to transact packets on - * @budget: Total limit on number of packets to process - * - * This function provides a "bounce buffer" approach to Rx interrupt - * processing. The advantage to this is that on systems that have - * expensive overhead for IOMMU access this provides a means of avoiding - * it by maintaining the mapping of the page to the system. - * - * Returns amount of work completed. - **/ -static int wx_clean_rx_irq(struct wx_q_vector *q_vector, - struct wx_ring *rx_ring, - int budget) -{ - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 cleaned_count = wx_desc_unused(rx_ring); - - do { - struct wx_rx_buffer *rx_buffer; - union wx_rx_desc *rx_desc; - struct sk_buff *skb; - int rx_buffer_pgcnt; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= WX_RX_BUFFER_WRITE) { - wx_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD)) - break; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * descriptor has been written back - */ - dma_rmb(); - - rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt); - - /* retrieve a buffer from the ring */ - skb = wx_build_skb(rx_ring, rx_buffer, rx_desc); - - /* exit if we failed to retrieve a buffer */ - if (!skb) { - break; - } - - wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); - cleaned_count++; - - /* place incomplete frames back on ring for completion */ - if (wx_is_non_eop(rx_ring, rx_desc, skb)) - continue; - - /* verify the packet layout is correct */ - if (wx_cleanup_headers(rx_ring, rx_desc, skb)) - continue; - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - - /* populate checksum, timestamp, VLAN, and protocol */ - wx_process_skb_fields(rx_ring, rx_desc, skb); - napi_gro_receive(&q_vector->napi, skb); - - /* update budget accounting */ - total_rx_packets++; - } while (likely(total_rx_packets < budget)); - - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; - - return total_rx_packets; -} - -static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring) -{ - return netdev_get_tx_queue(ring->netdev, ring->queue_index); -} - -/** - * wx_clean_tx_irq - Reclaim resources after transmit completes - * @q_vector: structure containing interrupt and ring information - * @tx_ring: tx ring to clean - * @napi_budget: Used to determine if we are in netpoll - **/ -static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, - struct wx_ring *tx_ring, int napi_budget) -{ - unsigned int budget = q_vector->wx->tx_work_limit; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i = tx_ring->next_to_clean; - struct wx_tx_buffer *tx_buffer; - union wx_tx_desc *tx_desc; - - if (!netif_carrier_ok(tx_ring->netdev)) - return true; - - tx_buffer = &tx_ring->tx_buffer_info[i]; - tx_desc = WX_TX_DESC(tx_ring, i); - i -= tx_ring->count; - - do { - union wx_tx_desc *eop_desc = tx_buffer->next_to_watch; - - /* if next_to_watch is not set then there is no work pending */ - if (!eop_desc) - break; - - /* prevent any other reads prior to eop_desc */ - smp_rmb(); - - /* if DD is not set pending work has not been completed */ - if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) - break; - - /* clear next_to_watch to prevent false hangs */ - tx_buffer->next_to_watch = NULL; - - /* update the statistics for this packet */ - total_bytes += tx_buffer->bytecount; - total_packets += tx_buffer->gso_segs; - - /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); - - /* unmap skb header data */ - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - - /* clear tx_buffer data */ - dma_unmap_len_set(tx_buffer, len, 0); - - /* unmap remaining buffers */ - while (tx_desc != eop_desc) { - tx_buffer++; - tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; - tx_buffer = tx_ring->tx_buffer_info; - tx_desc = WX_TX_DESC(tx_ring, 0); - } - - /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buffer, len, 0); - } - } - - /* move us one more past the eop_desc for start of next pkt */ - tx_buffer++; - tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; - tx_buffer = tx_ring->tx_buffer_info; - tx_desc = WX_TX_DESC(tx_ring, 0); - } - - /* issue prefetch for next Tx descriptor */ - prefetch(tx_desc); - - /* update budget accounting */ - budget--; - } while (likely(budget)); - - i += tx_ring->count; - tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; - - netdev_tx_completed_queue(wx_txring_txq(tx_ring), - total_packets, total_bytes); - -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && - (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->queue_index) && - netif_running(tx_ring->netdev)) - netif_wake_subqueue(tx_ring->netdev, - tx_ring->queue_index); - } - - return !!budget; -} - -/** - * wx_poll - NAPI polling RX/TX cleanup routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function will clean all queues associated with a q_vector. - **/ -static int wx_poll(struct napi_struct *napi, int budget) -{ - struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi); - int per_ring_budget, work_done = 0; - struct wx *wx = q_vector->wx; - bool clean_complete = true; - struct wx_ring *ring; - - wx_for_each_ring(ring, q_vector->tx) { - if (!wx_clean_tx_irq(q_vector, ring, budget)) - clean_complete = false; - } - - /* Exit if we are called by netpoll */ - if (budget <= 0) - return budget; - - /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling - */ - if (q_vector->rx.count > 1) - per_ring_budget = max(budget / q_vector->rx.count, 1); - else - per_ring_budget = budget; - - wx_for_each_ring(ring, q_vector->rx) { - int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget); - - work_done += cleaned; - if (cleaned >= per_ring_budget) - clean_complete = false; - } - - /* If all work not completed, return budget and keep polling */ - if (!clean_complete) - return budget; - - /* all work done, exit the polling mode */ - if (likely(napi_complete_done(napi, work_done))) { - if (netif_running(wx->netdev)) - wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx)); - } - - return min(work_done, budget - 1); -} - -static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) -{ - if (likely(wx_desc_unused(tx_ring) >= size)) - return 0; - - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - /* For the next check */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. - */ - if (likely(wx_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - - return 0; -} - -static u32 wx_tx_cmd_type(u32 tx_flags) -{ - /* set type for advanced descriptor with frame checksum insertion */ - u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; - - /* set HW vlan bit if vlan is present */ - cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE); - /* set segmentation enable bits for TSO/FSO */ - cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE); - /* set timestamp bit if present */ - cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP); - cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC); - - return cmd_type; -} - -static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, - u32 tx_flags, unsigned int paylen) -{ - u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT; - - /* enable L4 checksum for TSO and TX checksum offload */ - olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS); - /* enable IPv4 checksum for TSO */ - olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS); - /* enable outer IPv4 checksum for TSO */ - olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4, - WX_TXD_EIPCS); - /* Check Context must be set if Tx switch is enabled, which it - * always is for case where virtual functions are running - */ - olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC); - olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC, - WX_TXD_IPSEC); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); -} - -static void wx_tx_map(struct wx_ring *tx_ring, - struct wx_tx_buffer *first, - const u8 hdr_len) -{ - struct sk_buff *skb = first->skb; - struct wx_tx_buffer *tx_buffer; - u32 tx_flags = first->tx_flags; - u16 i = tx_ring->next_to_use; - unsigned int data_len, size; - union wx_tx_desc *tx_desc; - skb_frag_t *frag; - dma_addr_t dma; - u32 cmd_type; - - cmd_type = wx_tx_cmd_type(tx_flags); - tx_desc = WX_TX_DESC(tx_ring, i); - wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); - - size = skb_headlen(skb); - data_len = skb->data_len; - dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - - tx_buffer = first; - - for (frag = &skb_shinfo(skb)->frags[0];; frag++) { - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - - /* record length, and DMA address */ - dma_unmap_len_set(tx_buffer, len, size); - dma_unmap_addr_set(tx_buffer, dma, dma); - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - - while (unlikely(size > WX_MAX_DATA_PER_TXD)) { - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD); - - i++; - tx_desc++; - if (i == tx_ring->count) { - tx_desc = WX_TX_DESC(tx_ring, 0); - i = 0; - } - tx_desc->read.olinfo_status = 0; - - dma += WX_MAX_DATA_PER_TXD; - size -= WX_MAX_DATA_PER_TXD; - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - } - - if (likely(!data_len)) - break; - - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); - - i++; - tx_desc++; - if (i == tx_ring->count) { - tx_desc = WX_TX_DESC(tx_ring, 0); - i = 0; - } - tx_desc->read.olinfo_status = 0; - - size = skb_frag_size(frag); - - data_len -= size; - - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, - DMA_TO_DEVICE); - - tx_buffer = &tx_ring->tx_buffer_info[i]; - } - - /* write last descriptor with RS and EOP bits */ - cmd_type |= size | WX_TXD_EOP | WX_TXD_RS; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - - netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); - - skb_tx_timestamp(skb); - - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. (Only applicable for weak-ordered - * memory model archs, such as IA-64). - * - * We also need this memory barrier to make certain all of the - * status bits have been updated before next_to_watch is written. - */ - wmb(); - - /* set next_to_watch value indicating a packet is present */ - first->next_to_watch = tx_desc; - - i++; - if (i == tx_ring->count) - i = 0; - - tx_ring->next_to_use = i; - - wx_maybe_stop_tx(tx_ring, DESC_NEEDED); - - if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) - writel(i, tx_ring->tail); - - return; -dma_error: - dev_err(tx_ring->dev, "TX DMA map failed\n"); - - /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { - tx_buffer = &tx_ring->tx_buffer_info[i]; - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buffer, len, 0); - if (tx_buffer == first) - break; - if (i == 0) - i += tx_ring->count; - i--; - } - - dev_kfree_skb_any(first->skb); - first->skb = NULL; - - tx_ring->next_to_use = i; -} - -static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) -{ - struct wx_tx_context_desc *context_desc; - u16 i = tx_ring->next_to_use; - - context_desc = WX_TX_CTXTDESC(tx_ring, i); - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - - /* set bits to identify this as an advanced context descriptor */ - type_tucmd |= WX_TXD_DTYP_CTXT; - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); -} - -static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) -{ - struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); - - *nexthdr = hdr->nexthdr; - offset += sizeof(struct ipv6hdr); - while (ipv6_ext_hdr(*nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - if (*nexthdr == NEXTHDR_NONE) - return; - hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); - if (!hp) - return; - if (*nexthdr == NEXTHDR_FRAGMENT) - break; - *nexthdr = hp->nexthdr; - } -} - -union network_header { - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - void *raw; -}; - -static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) -{ - u8 tun_prot = 0, l4_prot = 0, ptype = 0; - struct sk_buff *skb = first->skb; - - if (skb->encapsulation) { - union network_header hdr; - - switch (first->protocol) { - case htons(ETH_P_IP): - tun_prot = ip_hdr(skb)->protocol; - ptype = WX_PTYPE_TUN_IPV4; - break; - case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); - ptype = WX_PTYPE_TUN_IPV6; - break; - default: - return ptype; - } - - if (tun_prot == IPPROTO_IPIP) { - hdr.raw = (void *)inner_ip_hdr(skb); - ptype |= WX_PTYPE_PKT_IPIP; - } else if (tun_prot == IPPROTO_UDP) { - hdr.raw = (void *)inner_ip_hdr(skb); - if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || - skb->inner_protocol != htons(ETH_P_TEB)) { - ptype |= WX_PTYPE_PKT_IG; - } else { - if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto - == htons(ETH_P_8021Q)) - ptype |= WX_PTYPE_PKT_IGMV; - else - ptype |= WX_PTYPE_PKT_IGM; - } - - } else if (tun_prot == IPPROTO_GRE) { - hdr.raw = (void *)inner_ip_hdr(skb); - if (skb->inner_protocol == htons(ETH_P_IP) || - skb->inner_protocol == htons(ETH_P_IPV6)) { - ptype |= WX_PTYPE_PKT_IG; - } else { - if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto - == htons(ETH_P_8021Q)) - ptype |= WX_PTYPE_PKT_IGMV; - else - ptype |= WX_PTYPE_PKT_IGM; - } - } else { - return ptype; - } - - switch (hdr.ipv4->version) { - case IPVERSION: - l4_prot = hdr.ipv4->protocol; - break; - case 6: - wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); - ptype |= WX_PTYPE_PKT_IPV6; - break; - default: - return ptype; - } - } else { - switch (first->protocol) { - case htons(ETH_P_IP): - l4_prot = ip_hdr(skb)->protocol; - ptype = WX_PTYPE_PKT_IP; - break; - case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); - ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; - break; - default: - return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC; - } - } - switch (l4_prot) { - case IPPROTO_TCP: - ptype |= WX_PTYPE_TYP_TCP; - break; - case IPPROTO_UDP: - ptype |= WX_PTYPE_TYP_UDP; - break; - case IPPROTO_SCTP: - ptype |= WX_PTYPE_TYP_SCTP; - break; - default: - ptype |= WX_PTYPE_TYP_IP; - break; - } - - return ptype; -} - -static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, - u8 *hdr_len, u8 ptype) -{ - u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; - struct net_device *netdev = tx_ring->netdev; - u32 l4len, tunhdr_eiplen_tunlen = 0; - struct sk_buff *skb = first->skb; - bool enc = skb->encapsulation; - struct ipv6hdr *ipv6h; - struct tcphdr *tcph; - struct iphdr *iph; - u8 tun_prot = 0; - int err; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (!skb_is_gso(skb)) - return 0; - - err = skb_cow_head(skb, 0); - if (err < 0) - return err; - - /* indicates the inner headers in the skbuff are valid. */ - iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); - if (iph->version == 4) { - tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); - first->tx_flags |= WX_TX_FLAGS_TSO | - WX_TX_FLAGS_CSUM | - WX_TX_FLAGS_IPV4 | - WX_TX_FLAGS_CC; - } else if (iph->version == 6 && skb_is_gso_v6(skb)) { - ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); - ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, - &ipv6h->daddr, 0, - IPPROTO_TCP, 0); - first->tx_flags |= WX_TX_FLAGS_TSO | - WX_TX_FLAGS_CSUM | - WX_TX_FLAGS_CC; - } - - /* compute header lengths */ - l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) : - skb_transport_offset(skb); - *hdr_len += l4len; - - /* update gso size and bytecount with header size */ - first->gso_segs = skb_shinfo(skb)->gso_segs; - first->bytecount += (first->gso_segs - 1) * *hdr_len; - - /* mss_l4len_id: use 0 as index for TSO */ - mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT; - mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT; - - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - if (enc) { - switch (first->protocol) { - case htons(ETH_P_IP): - tun_prot = ip_hdr(skb)->protocol; - first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; - break; - case htons(ETH_P_IPV6): - tun_prot = ipv6_hdr(skb)->nexthdr; - break; - default: - break; - } - switch (tun_prot) { - case IPPROTO_UDP: - tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; - tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << - WX_TXD_OUTER_IPLEN_SHIFT) | - (((skb_inner_mac_header(skb) - - skb_transport_header(skb)) >> 1) << - WX_TXD_TUNNEL_LEN_SHIFT); - break; - case IPPROTO_GRE: - tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; - tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << - WX_TXD_OUTER_IPLEN_SHIFT) | - (((skb_inner_mac_header(skb) - - skb_transport_header(skb)) >> 1) << - WX_TXD_TUNNEL_LEN_SHIFT); - break; - case IPPROTO_IPIP: - tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - - (char *)ip_hdr(skb)) >> 2) << - WX_TXD_OUTER_IPLEN_SHIFT; - break; - default: - break; - } - vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; - } else { - vlan_macip_lens = skb_network_header_len(skb) >> 1; - } - - vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; - vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; - - type_tucmd = ptype << 24; - if (skb->vlan_proto == htons(ETH_P_8021AD) && - netdev->features & NETIF_F_HW_VLAN_STAG_TX) - type_tucmd |= WX_SET_FLAG(first->tx_flags, - WX_TX_FLAGS_HW_VLAN, - 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); - wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, - type_tucmd, mss_l4len_idx); - - return 1; -} - -static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, - u8 ptype) -{ - u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0; - struct net_device *netdev = tx_ring->netdev; - u32 mss_l4len_idx = 0, type_tucmd; - struct sk_buff *skb = first->skb; - u8 tun_prot = 0; - - if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & WX_TX_FLAGS_CC)) - return; - vlan_macip_lens = skb_network_offset(skb) << - WX_TXD_MACLEN_SHIFT; - } else { - u8 l4_prot = 0; - union { - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - u8 *raw; - } network_hdr; - union { - struct tcphdr *tcphdr; - u8 *raw; - } transport_hdr; - - if (skb->encapsulation) { - network_hdr.raw = skb_inner_network_header(skb); - transport_hdr.raw = skb_inner_transport_header(skb); - vlan_macip_lens = skb_network_offset(skb) << - WX_TXD_MACLEN_SHIFT; - switch (first->protocol) { - case htons(ETH_P_IP): - tun_prot = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - tun_prot = ipv6_hdr(skb)->nexthdr; - break; - default: - return; - } - switch (tun_prot) { - case IPPROTO_UDP: - tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; - tunhdr_eiplen_tunlen |= - ((skb_network_header_len(skb) >> 2) << - WX_TXD_OUTER_IPLEN_SHIFT) | - (((skb_inner_mac_header(skb) - - skb_transport_header(skb)) >> 1) << - WX_TXD_TUNNEL_LEN_SHIFT); - break; - case IPPROTO_GRE: - tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; - tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << - WX_TXD_OUTER_IPLEN_SHIFT) | - (((skb_inner_mac_header(skb) - - skb_transport_header(skb)) >> 1) << - WX_TXD_TUNNEL_LEN_SHIFT); - break; - case IPPROTO_IPIP: - tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - - (char *)ip_hdr(skb)) >> 2) << - WX_TXD_OUTER_IPLEN_SHIFT; - break; - default: - break; - } - - } else { - network_hdr.raw = skb_network_header(skb); - transport_hdr.raw = skb_transport_header(skb); - vlan_macip_lens = skb_network_offset(skb) << - WX_TXD_MACLEN_SHIFT; - } - - switch (network_hdr.ipv4->version) { - case IPVERSION: - vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; - l4_prot = network_hdr.ipv4->protocol; - break; - case 6: - vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; - l4_prot = network_hdr.ipv6->nexthdr; - break; - default: - break; - } - - switch (l4_prot) { - case IPPROTO_TCP: - mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << - WX_TXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - mss_l4len_idx = sizeof(struct sctphdr) << - WX_TXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - WX_TXD_L4LEN_SHIFT; - break; - default: - break; - } - - /* update TX checksum flag */ - first->tx_flags |= WX_TX_FLAGS_CSUM; - } - first->tx_flags |= WX_TX_FLAGS_CC; - /* vlan_macip_lens: MACLEN, VLAN tag */ - vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; - - type_tucmd = ptype << 24; - if (skb->vlan_proto == htons(ETH_P_8021AD) && - netdev->features & NETIF_F_HW_VLAN_STAG_TX) - type_tucmd |= WX_SET_FLAG(first->tx_flags, - WX_TX_FLAGS_HW_VLAN, - 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); - wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, - type_tucmd, mss_l4len_idx); -} - -static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, - struct wx_ring *tx_ring) -{ - u16 count = TXD_USE_COUNT(skb_headlen(skb)); - struct wx_tx_buffer *first; - u8 hdr_len = 0, ptype; - unsigned short f; - u32 tx_flags = 0; - int tso; - - /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, - * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> - frags[f])); - - if (wx_maybe_stop_tx(tx_ring, count + 3)) - return NETDEV_TX_BUSY; - - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; - first->skb = skb; - first->bytecount = skb->len; - first->gso_segs = 1; - - /* if we have a HW VLAN tag being added default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT; - tx_flags |= WX_TX_FLAGS_HW_VLAN; - } - - /* record initial flags and protocol */ - first->tx_flags = tx_flags; - first->protocol = vlan_get_protocol(skb); - - ptype = wx_encode_tx_desc_ptype(first); - - tso = wx_tso(tx_ring, first, &hdr_len, ptype); - if (tso < 0) - goto out_drop; - else if (!tso) - wx_tx_csum(tx_ring, first, ptype); - wx_tx_map(tx_ring, first, hdr_len); - - return NETDEV_TX_OK; -out_drop: - dev_kfree_skb_any(first->skb); - first->skb = NULL; - - return NETDEV_TX_OK; -} - -netdev_tx_t wx_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - unsigned int r_idx = skb->queue_mapping; - struct wx *wx = netdev_priv(netdev); - struct wx_ring *tx_ring; - - if (!netif_carrier_ok(netdev)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - /* The minimum packet size for olinfo paylen is 17 so pad the skb - * in order to meet this minimum size requirement. - */ - if (skb_put_padto(skb, 17)) - return NETDEV_TX_OK; - - if (r_idx >= wx->num_tx_queues) - r_idx = r_idx % wx->num_tx_queues; - tx_ring = wx->tx_ring[r_idx]; - - return wx_xmit_frame_ring(skb, tx_ring); -} -EXPORT_SYMBOL(wx_xmit_frame); - -void wx_napi_enable_all(struct wx *wx) -{ - struct wx_q_vector *q_vector; - int q_idx; - - for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { - q_vector = wx->q_vector[q_idx]; - napi_enable(&q_vector->napi); - } -} -EXPORT_SYMBOL(wx_napi_enable_all); - -void wx_napi_disable_all(struct wx *wx) -{ - struct wx_q_vector *q_vector; - int q_idx; - - for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { - q_vector = wx->q_vector[q_idx]; - napi_disable(&q_vector->napi); - } -} -EXPORT_SYMBOL(wx_napi_disable_all); - -/** - * wx_set_rss_queues: Allocate queues for RSS - * @wx: board private structure to initialize - * - * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try - * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. - * - **/ -static void wx_set_rss_queues(struct wx *wx) -{ - wx->num_rx_queues = wx->mac.max_rx_queues; - wx->num_tx_queues = wx->mac.max_tx_queues; -} - -static void wx_set_num_queues(struct wx *wx) -{ - /* Start with base case */ - wx->num_rx_queues = 1; - wx->num_tx_queues = 1; - wx->queues_per_pool = 1; - - wx_set_rss_queues(wx); -} - -/** - * wx_acquire_msix_vectors - acquire MSI-X vectors - * @wx: board private structure - * - * Attempts to acquire a suitable range of MSI-X vector interrupts. Will - * return a negative error code if unable to acquire MSI-X vectors for any - * reason. - */ -static int wx_acquire_msix_vectors(struct wx *wx) -{ - struct irq_affinity affd = {0, }; - int nvecs, i; - - nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); - - wx->msix_entries = kcalloc(nvecs, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!wx->msix_entries) - return -ENOMEM; - - nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, - nvecs, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &affd); - if (nvecs < 0) { - wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); - kfree(wx->msix_entries); - wx->msix_entries = NULL; - return nvecs; - } - - for (i = 0; i < nvecs; i++) { - wx->msix_entries[i].entry = i; - wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); - } - - /* one for msix_other */ - nvecs -= 1; - wx->num_q_vectors = nvecs; - wx->num_rx_queues = nvecs; - wx->num_tx_queues = nvecs; - - return 0; -} - -/** - * wx_set_interrupt_capability - set MSI-X or MSI if supported - * @wx: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -static int wx_set_interrupt_capability(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - int nvecs, ret; - - /* We will try to get MSI-X interrupts first */ - ret = wx_acquire_msix_vectors(wx); - if (ret == 0 || (ret == -ENOMEM)) - return ret; - - wx->num_rx_queues = 1; - wx->num_tx_queues = 1; - wx->num_q_vectors = 1; - - /* minmum one for queue, one for misc*/ - nvecs = 1; - nvecs = pci_alloc_irq_vectors(pdev, nvecs, - nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (nvecs == 1) { - if (pdev->msi_enabled) - wx_err(wx, "Fallback to MSI.\n"); - else - wx_err(wx, "Fallback to LEGACY.\n"); - } else { - wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs); - return nvecs; - } - - pdev->irq = pci_irq_vector(pdev, 0); - wx->num_q_vectors = 1; - - return 0; -} - -/** - * wx_cache_ring_rss - Descriptor ring to register mapping for RSS - * @wx: board private structure to initialize - * - * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. - * - **/ -static void wx_cache_ring_rss(struct wx *wx) -{ - u16 i; - - for (i = 0; i < wx->num_rx_queues; i++) - wx->rx_ring[i]->reg_idx = i; - - for (i = 0; i < wx->num_tx_queues; i++) - wx->tx_ring[i]->reg_idx = i; -} - -static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head) -{ - ring->next = head->ring; - head->ring = ring; - head->count++; -} - -/** - * wx_alloc_q_vector - Allocate memory for a single interrupt vector - * @wx: board private structure to initialize - * @v_count: q_vectors allocated on wx, used for ring interleaving - * @v_idx: index of vector in wx struct - * @txr_count: total number of Tx rings to allocate - * @txr_idx: index of first Tx ring to allocate - * @rxr_count: total number of Rx rings to allocate - * @rxr_idx: index of first Rx ring to allocate - * - * We allocate one q_vector. If allocation fails we return -ENOMEM. - **/ -static int wx_alloc_q_vector(struct wx *wx, - unsigned int v_count, unsigned int v_idx, - unsigned int txr_count, unsigned int txr_idx, - unsigned int rxr_count, unsigned int rxr_idx) -{ - struct wx_q_vector *q_vector; - int ring_count, default_itr; - struct wx_ring *ring; - - /* note this will allocate space for the ring structure as well! */ - ring_count = txr_count + rxr_count; - - q_vector = kzalloc(struct_size(q_vector, ring, ring_count), - GFP_KERNEL); - if (!q_vector) - return -ENOMEM; - - /* initialize NAPI */ - netif_napi_add(wx->netdev, &q_vector->napi, - wx_poll); - - /* tie q_vector and wx together */ - wx->q_vector[v_idx] = q_vector; - q_vector->wx = wx; - q_vector->v_idx = v_idx; - if (cpu_online(v_idx)) - q_vector->numa_node = cpu_to_node(v_idx); - - /* initialize pointer to rings */ - ring = q_vector->ring; - - if (wx->mac.type == wx_mac_sp) - default_itr = WX_12K_ITR; - else - default_itr = WX_7K_ITR; - /* initialize ITR */ - if (txr_count && !rxr_count) - /* tx only vector */ - q_vector->itr = wx->tx_itr_setting ? - default_itr : wx->tx_itr_setting; - else - /* rx or rx/tx vector */ - q_vector->itr = wx->rx_itr_setting ? - default_itr : wx->rx_itr_setting; - - while (txr_count) { - /* assign generic ring traits */ - ring->dev = &wx->pdev->dev; - ring->netdev = wx->netdev; - - /* configure backlink on ring */ - ring->q_vector = q_vector; - - /* update q_vector Tx values */ - wx_add_ring(ring, &q_vector->tx); - - /* apply Tx specific ring traits */ - ring->count = wx->tx_ring_count; - - ring->queue_index = txr_idx; - - /* assign ring to wx */ - wx->tx_ring[txr_idx] = ring; - - /* update count and index */ - txr_count--; - txr_idx += v_count; - - /* push pointer to next ring */ - ring++; - } - - while (rxr_count) { - /* assign generic ring traits */ - ring->dev = &wx->pdev->dev; - ring->netdev = wx->netdev; - - /* configure backlink on ring */ - ring->q_vector = q_vector; - - /* update q_vector Rx values */ - wx_add_ring(ring, &q_vector->rx); - - /* apply Rx specific ring traits */ - ring->count = wx->rx_ring_count; - ring->queue_index = rxr_idx; - - /* assign ring to wx */ - wx->rx_ring[rxr_idx] = ring; - - /* update count and index */ - rxr_count--; - rxr_idx += v_count; - - /* push pointer to next ring */ - ring++; - } - - return 0; -} - -/** - * wx_free_q_vector - Free memory allocated for specific interrupt vector - * @wx: board private structure to initialize - * @v_idx: Index of vector to be freed - * - * This function frees the memory allocated to the q_vector. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void wx_free_q_vector(struct wx *wx, int v_idx) -{ - struct wx_q_vector *q_vector = wx->q_vector[v_idx]; - struct wx_ring *ring; - - wx_for_each_ring(ring, q_vector->tx) - wx->tx_ring[ring->queue_index] = NULL; - - wx_for_each_ring(ring, q_vector->rx) - wx->rx_ring[ring->queue_index] = NULL; - - wx->q_vector[v_idx] = NULL; - netif_napi_del(&q_vector->napi); - kfree_rcu(q_vector, rcu); -} - -/** - * wx_alloc_q_vectors - Allocate memory for interrupt vectors - * @wx: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int wx_alloc_q_vectors(struct wx *wx) -{ - unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; - unsigned int rxr_remaining = wx->num_rx_queues; - unsigned int txr_remaining = wx->num_tx_queues; - unsigned int q_vectors = wx->num_q_vectors; - int rqpv, tqpv; - int err; - - for (; v_idx < q_vectors; v_idx++) { - rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); - tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); - err = wx_alloc_q_vector(wx, q_vectors, v_idx, - tqpv, txr_idx, - rqpv, rxr_idx); - - if (err) - goto err_out; - - /* update counts and index */ - rxr_remaining -= rqpv; - txr_remaining -= tqpv; - rxr_idx++; - txr_idx++; - } - - return 0; - -err_out: - wx->num_tx_queues = 0; - wx->num_rx_queues = 0; - wx->num_q_vectors = 0; - - while (v_idx--) - wx_free_q_vector(wx, v_idx); - - return -ENOMEM; -} - -/** - * wx_free_q_vectors - Free memory allocated for interrupt vectors - * @wx: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void wx_free_q_vectors(struct wx *wx) -{ - int v_idx = wx->num_q_vectors; - - wx->num_tx_queues = 0; - wx->num_rx_queues = 0; - wx->num_q_vectors = 0; - - while (v_idx--) - wx_free_q_vector(wx, v_idx); -} - -void wx_reset_interrupt_capability(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - - if (!pdev->msi_enabled && !pdev->msix_enabled) - return; - - if (pdev->msix_enabled) { - kfree(wx->msix_entries); - wx->msix_entries = NULL; - } - pci_free_irq_vectors(wx->pdev); -} -EXPORT_SYMBOL(wx_reset_interrupt_capability); - -/** - * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings - * @wx: board private structure to clear interrupt scheme on - * - * We go through and clear interrupt specific resources and reset the structure - * to pre-load conditions - **/ -void wx_clear_interrupt_scheme(struct wx *wx) -{ - wx_free_q_vectors(wx); - wx_reset_interrupt_capability(wx); -} -EXPORT_SYMBOL(wx_clear_interrupt_scheme); - -int wx_init_interrupt_scheme(struct wx *wx) -{ - int ret; - - /* Number of supported queues */ - wx_set_num_queues(wx); - - /* Set interrupt mode */ - ret = wx_set_interrupt_capability(wx); - if (ret) { - wx_err(wx, "Allocate irq vectors for failed.\n"); - return ret; - } - - /* Allocate memory for queues */ - ret = wx_alloc_q_vectors(wx); - if (ret) { - wx_err(wx, "Unable to allocate memory for queue vectors.\n"); - wx_reset_interrupt_capability(wx); - return ret; - } - - wx_cache_ring_rss(wx); - - return 0; -} -EXPORT_SYMBOL(wx_init_interrupt_scheme); - -irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data) -{ - struct wx_q_vector *q_vector = data; - - /* EIAM disabled interrupts (on this vector) for us */ - if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule_irqoff(&q_vector->napi); - - return IRQ_HANDLED; -} -EXPORT_SYMBOL(wx_msix_clean_rings); - -void wx_free_irq(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - int vector; - - if (!(pdev->msix_enabled)) { - free_irq(pdev->irq, wx); - return; - } - - for (vector = 0; vector < wx->num_q_vectors; vector++) { - struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; - - /* free only the irqs that were actually requested */ - if (!q_vector->rx.ring && !q_vector->tx.ring) - continue; - - free_irq(entry->vector, q_vector); - } - - if (wx->mac.type == wx_mac_em) - free_irq(wx->msix_entries[vector].vector, wx); -} -EXPORT_SYMBOL(wx_free_irq); - -/** - * wx_setup_isb_resources - allocate interrupt status resources - * @wx: board private structure - * - * Return 0 on success, negative on failure - **/ -int wx_setup_isb_resources(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - - wx->isb_mem = dma_alloc_coherent(&pdev->dev, - sizeof(u32) * 4, - &wx->isb_dma, - GFP_KERNEL); - if (!wx->isb_mem) { - wx_err(wx, "Alloc isb_mem failed\n"); - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL(wx_setup_isb_resources); - -/** - * wx_free_isb_resources - allocate all queues Rx resources - * @wx: board private structure - * - * Return 0 on success, negative on failure - **/ -void wx_free_isb_resources(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - - dma_free_coherent(&pdev->dev, sizeof(u32) * 4, - wx->isb_mem, wx->isb_dma); - wx->isb_mem = NULL; -} -EXPORT_SYMBOL(wx_free_isb_resources); - -u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx) -{ - u32 cur_tag = 0; - - cur_tag = wx->isb_mem[WX_ISB_HEADER]; - wx->isb_tag[idx] = cur_tag; - - return (__force u32)cpu_to_le32(wx->isb_mem[idx]); -} -EXPORT_SYMBOL(wx_misc_isb); - -/** - * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors - * @wx: pointer to wx struct - * @direction: 0 for Rx, 1 for Tx, -1 for other causes - * @queue: queue to map the corresponding interrupt to - * @msix_vector: the vector to map to the corresponding queue - * - **/ -static void wx_set_ivar(struct wx *wx, s8 direction, - u16 queue, u16 msix_vector) -{ - u32 ivar, index; - - if (direction == -1) { - /* other causes */ - msix_vector |= WX_PX_IVAR_ALLOC_VAL; - index = 0; - ivar = rd32(wx, WX_PX_MISC_IVAR); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - wr32(wx, WX_PX_MISC_IVAR, ivar); - } else { - /* tx or rx causes */ - msix_vector |= WX_PX_IVAR_ALLOC_VAL; - index = ((16 * (queue & 1)) + (8 * direction)); - ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - wr32(wx, WX_PX_IVAR(queue >> 1), ivar); - } -} - -/** - * wx_write_eitr - write EITR register in hardware specific way - * @q_vector: structure containing interrupt and ring information - * - * This function is made to be called by ethtool and by the driver - * when it needs to update EITR registers at runtime. Hardware - * specific quirks/differences are taken care of here. - */ -static void wx_write_eitr(struct wx_q_vector *q_vector) -{ - struct wx *wx = q_vector->wx; - int v_idx = q_vector->v_idx; - u32 itr_reg; - - if (wx->mac.type == wx_mac_sp) - itr_reg = q_vector->itr & WX_SP_MAX_EITR; - else - itr_reg = q_vector->itr & WX_EM_MAX_EITR; - - itr_reg |= WX_PX_ITR_CNT_WDIS; - - wr32(wx, WX_PX_ITR(v_idx), itr_reg); -} - -/** - * wx_configure_vectors - Configure vectors for hardware - * @wx: board private structure - * - * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY - * interrupts. - **/ -void wx_configure_vectors(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - u32 eitrsel = 0; - u16 v_idx; - - if (pdev->msix_enabled) { - /* Populate MSIX to EITR Select */ - wr32(wx, WX_PX_ITRSEL, eitrsel); - /* use EIAM to auto-mask when MSI-X interrupt is asserted - * this saves a register write for every interrupt - */ - wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL); - } else { - /* legacy interrupts, use EIAM to auto-mask when reading EICR, - * specifically only auto mask tx and rx interrupts. - */ - wr32(wx, WX_PX_GPIE, 0); - } - - /* Populate the IVAR table and set the ITR values to the - * corresponding register. - */ - for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) { - struct wx_q_vector *q_vector = wx->q_vector[v_idx]; - struct wx_ring *ring; - - wx_for_each_ring(ring, q_vector->rx) - wx_set_ivar(wx, 0, ring->reg_idx, v_idx); - - wx_for_each_ring(ring, q_vector->tx) - wx_set_ivar(wx, 1, ring->reg_idx, v_idx); - - wx_write_eitr(q_vector); - } - - wx_set_ivar(wx, -1, 0, v_idx); - if (pdev->msix_enabled) - wr32(wx, WX_PX_ITR(v_idx), 1950); -} -EXPORT_SYMBOL(wx_configure_vectors); - -/** - * wx_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -static void wx_clean_rx_ring(struct wx_ring *rx_ring) -{ - struct wx_rx_buffer *rx_buffer; - u16 i = rx_ring->next_to_clean; - - rx_buffer = &rx_ring->rx_buffer_info[i]; - - /* Free all the Rx ring sk_buffs */ - while (i != rx_ring->next_to_alloc) { - if (rx_buffer->skb) { - struct sk_buff *skb = rx_buffer->skb; - - if (WX_CB(skb)->page_released) - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - - dev_kfree_skb(skb); - } - - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - WX_RX_BUFSZ, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - - i++; - rx_buffer++; - if (i == rx_ring->count) { - i = 0; - rx_buffer = rx_ring->rx_buffer_info; - } - } - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -/** - * wx_clean_all_rx_rings - Free Rx Buffers for all queues - * @wx: board private structure - **/ -void wx_clean_all_rx_rings(struct wx *wx) -{ - int i; - - for (i = 0; i < wx->num_rx_queues; i++) - wx_clean_rx_ring(wx->rx_ring[i]); -} -EXPORT_SYMBOL(wx_clean_all_rx_rings); - -/** - * wx_free_rx_resources - Free Rx Resources - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ -static void wx_free_rx_resources(struct wx_ring *rx_ring) -{ - wx_clean_rx_ring(rx_ring); - kvfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - - /* if not set, then don't free */ - if (!rx_ring->desc) - return; - - dma_free_coherent(rx_ring->dev, rx_ring->size, - rx_ring->desc, rx_ring->dma); - - rx_ring->desc = NULL; - - if (rx_ring->page_pool) { - page_pool_destroy(rx_ring->page_pool); - rx_ring->page_pool = NULL; - } -} - -/** - * wx_free_all_rx_resources - Free Rx Resources for All Queues - * @wx: pointer to hardware structure - * - * Free all receive software resources - **/ -static void wx_free_all_rx_resources(struct wx *wx) -{ - int i; - - for (i = 0; i < wx->num_rx_queues; i++) - wx_free_rx_resources(wx->rx_ring[i]); -} - -/** - * wx_clean_tx_ring - Free Tx Buffers - * @tx_ring: ring to be cleaned - **/ -static void wx_clean_tx_ring(struct wx_ring *tx_ring) -{ - struct wx_tx_buffer *tx_buffer; - u16 i = tx_ring->next_to_clean; - - tx_buffer = &tx_ring->tx_buffer_info[i]; - - while (i != tx_ring->next_to_use) { - union wx_tx_desc *eop_desc, *tx_desc; - - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); - - /* unmap skb header data */ - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - - /* check for eop_desc to determine the end of the packet */ - eop_desc = tx_buffer->next_to_watch; - tx_desc = WX_TX_DESC(tx_ring, i); - - /* unmap remaining buffers */ - while (tx_desc != eop_desc) { - tx_buffer++; - tx_desc++; - i++; - if (unlikely(i == tx_ring->count)) { - i = 0; - tx_buffer = tx_ring->tx_buffer_info; - tx_desc = WX_TX_DESC(tx_ring, 0); - } - - /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - - /* move us one more past the eop_desc for start of next pkt */ - tx_buffer++; - i++; - if (unlikely(i == tx_ring->count)) { - i = 0; - tx_buffer = tx_ring->tx_buffer_info; - } - } - - netdev_tx_reset_queue(wx_txring_txq(tx_ring)); - - /* reset next_to_use and next_to_clean */ - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; -} - -/** - * wx_clean_all_tx_rings - Free Tx Buffers for all queues - * @wx: board private structure - **/ -void wx_clean_all_tx_rings(struct wx *wx) -{ - int i; - - for (i = 0; i < wx->num_tx_queues; i++) - wx_clean_tx_ring(wx->tx_ring[i]); -} -EXPORT_SYMBOL(wx_clean_all_tx_rings); - -/** - * wx_free_tx_resources - Free Tx Resources per Queue - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ -static void wx_free_tx_resources(struct wx_ring *tx_ring) -{ - wx_clean_tx_ring(tx_ring); - kvfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - - /* if not set, then don't free */ - if (!tx_ring->desc) - return; - - dma_free_coherent(tx_ring->dev, tx_ring->size, - tx_ring->desc, tx_ring->dma); - tx_ring->desc = NULL; -} - -/** - * wx_free_all_tx_resources - Free Tx Resources for All Queues - * @wx: pointer to hardware structure - * - * Free all transmit software resources - **/ -static void wx_free_all_tx_resources(struct wx *wx) -{ - int i; - - for (i = 0; i < wx->num_tx_queues; i++) - wx_free_tx_resources(wx->tx_ring[i]); -} - -void wx_free_resources(struct wx *wx) -{ - wx_free_isb_resources(wx); - wx_free_all_rx_resources(wx); - wx_free_all_tx_resources(wx); -} -EXPORT_SYMBOL(wx_free_resources); - -static int wx_alloc_page_pool(struct wx_ring *rx_ring) -{ - int ret = 0; - - struct page_pool_params pp_params = { - .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .order = 0, - .pool_size = rx_ring->size, - .nid = dev_to_node(rx_ring->dev), - .dev = rx_ring->dev, - .dma_dir = DMA_FROM_DEVICE, - .offset = 0, - .max_len = PAGE_SIZE, - }; - - rx_ring->page_pool = page_pool_create(&pp_params); - if (IS_ERR(rx_ring->page_pool)) { - ret = PTR_ERR(rx_ring->page_pool); - rx_ring->page_pool = NULL; - } - - return ret; -} - -/** - * wx_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ -static int wx_setup_rx_resources(struct wx_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - int orig_node = dev_to_node(dev); - int numa_node = NUMA_NO_NODE; - int size, ret; - - size = sizeof(struct wx_rx_buffer) * rx_ring->count; - - if (rx_ring->q_vector) - numa_node = rx_ring->q_vector->numa_node; - - rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); - if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL); - if (!rx_ring->rx_buffer_info) - goto err; - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); - - set_dev_node(dev, numa_node); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) { - set_dev_node(dev, orig_node); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - } - - if (!rx_ring->desc) - goto err; - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - ret = wx_alloc_page_pool(rx_ring); - if (ret < 0) { - dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret); - goto err_desc; - } - - return 0; - -err_desc: - dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma); -err: - kvfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); - return -ENOMEM; -} - -/** - * wx_setup_all_rx_resources - allocate all queues Rx resources - * @wx: pointer to hardware structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int wx_setup_all_rx_resources(struct wx *wx) -{ - int i, err = 0; - - for (i = 0; i < wx->num_rx_queues; i++) { - err = wx_setup_rx_resources(wx->rx_ring[i]); - if (!err) - continue; - - wx_err(wx, "Allocation for Rx Queue %u failed\n", i); - goto err_setup_rx; - } - - return 0; -err_setup_rx: - /* rewind the index freeing the rings as we go */ - while (i--) - wx_free_rx_resources(wx->rx_ring[i]); - return err; -} - -/** - * wx_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup - * - * Return 0 on success, negative on failure - **/ -static int wx_setup_tx_resources(struct wx_ring *tx_ring) -{ - struct device *dev = tx_ring->dev; - int orig_node = dev_to_node(dev); - int numa_node = NUMA_NO_NODE; - int size; - - size = sizeof(struct wx_tx_buffer) * tx_ring->count; - - if (tx_ring->q_vector) - numa_node = tx_ring->q_vector->numa_node; - - tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); - if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL); - if (!tx_ring->tx_buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - set_dev_node(dev, numa_node); - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) { - set_dev_node(dev, orig_node); - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - } - - if (!tx_ring->desc) - goto err; - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - return 0; - -err: - kvfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); - return -ENOMEM; -} - -/** - * wx_setup_all_tx_resources - allocate all queues Tx resources - * @wx: pointer to private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int wx_setup_all_tx_resources(struct wx *wx) -{ - int i, err = 0; - - for (i = 0; i < wx->num_tx_queues; i++) { - err = wx_setup_tx_resources(wx->tx_ring[i]); - if (!err) - continue; - - wx_err(wx, "Allocation for Tx Queue %u failed\n", i); - goto err_setup_tx; - } - - return 0; -err_setup_tx: - /* rewind the index freeing the rings as we go */ - while (i--) - wx_free_tx_resources(wx->tx_ring[i]); - return err; -} - -int wx_setup_resources(struct wx *wx) -{ - int err; - - /* allocate transmit descriptors */ - err = wx_setup_all_tx_resources(wx); - if (err) - return err; - - /* allocate receive descriptors */ - err = wx_setup_all_rx_resources(wx); - if (err) - goto err_free_tx; - - err = wx_setup_isb_resources(wx); - if (err) - goto err_free_rx; - - return 0; - -err_free_rx: - wx_free_all_rx_resources(wx); -err_free_tx: - wx_free_all_tx_resources(wx); - - return err; -} -EXPORT_SYMBOL(wx_setup_resources); - -/** - * wx_get_stats64 - Get System Network Statistics - * @netdev: network interface device structure - * @stats: storage space for 64bit statistics - */ -void wx_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct wx *wx = netdev_priv(netdev); - int i; - - rcu_read_lock(); - for (i = 0; i < wx->num_rx_queues; i++) { - struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry(&ring->syncp, start)); - stats->rx_packets += packets; - stats->rx_bytes += bytes; - } - } - - for (i = 0; i < wx->num_tx_queues; i++) { - struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry(&ring->syncp, - start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; - } - } - - rcu_read_unlock(); -} -EXPORT_SYMBOL(wx_get_stats64); - -int wx_set_features(struct net_device *netdev, netdev_features_t features) -{ - netdev_features_t changed = netdev->features ^ features; - struct wx *wx = netdev_priv(netdev); - - if (changed & NETIF_F_RXHASH) - wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, - WX_RDB_RA_CTL_RSS_EN); - else - wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); - - netdev->features = features; - - if (changed & - (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX)) - wx_set_rx_mode(netdev); - - return 0; -} -EXPORT_SYMBOL(wx_set_features); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h deleted file mode 100644 index df1f4a5951f06ccb44e583ac6a3301838e999832..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * WangXun Gigabit PCI Express Linux driver - * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. - */ - -#ifndef _WX_LIB_H_ -#define _WX_LIB_H_ - -void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); -u16 wx_desc_unused(struct wx_ring *ring); -netdev_tx_t wx_xmit_frame(struct sk_buff *skb, - struct net_device *netdev); -void wx_napi_enable_all(struct wx *wx); -void wx_napi_disable_all(struct wx *wx); -void wx_reset_interrupt_capability(struct wx *wx); -void wx_clear_interrupt_scheme(struct wx *wx); -int wx_init_interrupt_scheme(struct wx *wx); -irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data); -void wx_free_irq(struct wx *wx); -int wx_setup_isb_resources(struct wx *wx); -void wx_free_isb_resources(struct wx *wx); -u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); -void wx_configure_vectors(struct wx *wx); -void wx_clean_all_rx_rings(struct wx *wx); -void wx_clean_all_tx_rings(struct wx *wx); -void wx_free_resources(struct wx *wx); -int wx_setup_resources(struct wx *wx); -void wx_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats); -int wx_set_features(struct net_device *netdev, netdev_features_t features); - -#endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h deleted file mode 100644 index c555af9ed51b29e1b8b069c9c020fb182a2aadfe..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ /dev/null @@ -1,965 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _WX_TYPE_H_ -#define _WX_TYPE_H_ - -#include -#include -#include -#include - -#define WX_NCSI_SUP 0x8000 -#define WX_NCSI_MASK 0x8000 -#define WX_WOL_SUP 0x4000 -#define WX_WOL_MASK 0x4000 - -/* MSI-X capability fields masks */ -#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF -#define WX_PCI_LINK_STATUS 0xB2 - -/**************** Global Registers ****************************/ -/* chip control Registers */ -#define WX_MIS_PWR 0x10000 -#define WX_MIS_RST 0x1000C -#define WX_MIS_RST_LAN_RST(_i) BIT((_i) + 1) -#define WX_MIS_RST_SW_RST BIT(0) -#define WX_MIS_ST 0x10028 -#define WX_MIS_ST_MNG_INIT_DN BIT(0) -#define WX_MIS_SWSM 0x1002C -#define WX_MIS_SWSM_SMBI BIT(0) -#define WX_MIS_RST_ST 0x10030 -#define WX_MIS_RST_ST_RST_INI_SHIFT 8 -#define WX_MIS_RST_ST_RST_INIT (0xFF << WX_MIS_RST_ST_RST_INI_SHIFT) - -/* FMGR Registers */ -#define WX_SPI_CMD 0x10104 -#define WX_SPI_CMD_READ_DWORD 0x1 -#define WX_SPI_CLK_DIV 0x3 -#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v) -#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v) -#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v) -#define WX_SPI_DATA 0x10108 -#define WX_SPI_DATA_BYPASS BIT(31) -#define WX_SPI_DATA_OP_DONE BIT(0) -#define WX_SPI_STATUS 0x1010C -#define WX_SPI_STATUS_OPDONE BIT(0) -#define WX_SPI_STATUS_FLASH_BYPASS BIT(31) -#define WX_SPI_ILDR_STATUS 0x10120 - -/* Sensors for PVT(Process Voltage Temperature) */ -#define WX_TS_EN 0x10304 -#define WX_TS_EN_ENA BIT(0) -#define WX_TS_ALARM_THRE 0x1030C -#define WX_TS_DALARM_THRE 0x10310 -#define WX_TS_INT_EN 0x10314 -#define WX_TS_INT_EN_DALARM_INT_EN BIT(1) -#define WX_TS_INT_EN_ALARM_INT_EN BIT(0) -#define WX_TS_ALARM_ST 0x10318 -#define WX_TS_ALARM_ST_DALARM BIT(1) -#define WX_TS_ALARM_ST_ALARM BIT(0) - -/************************* Port Registers ************************************/ -/* port cfg Registers */ -#define WX_CFG_PORT_CTL 0x14400 -#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) -#define WX_CFG_PORT_CTL_QINQ BIT(2) -#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ -#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) -#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ - - -/* GPIO Registers */ -#define WX_GPIO_DR 0x14800 -#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */ -#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */ -#define WX_GPIO_DDR 0x14804 -#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ -#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ -#define WX_GPIO_CTL 0x14808 -#define WX_GPIO_INTEN 0x14830 -#define WX_GPIO_INTEN_0 BIT(0) -#define WX_GPIO_INTEN_1 BIT(1) -#define WX_GPIO_INTMASK 0x14834 -#define WX_GPIO_INTTYPE_LEVEL 0x14838 -#define WX_GPIO_POLARITY 0x1483C -#define WX_GPIO_INTSTATUS 0x14844 -#define WX_GPIO_EOI 0x1484C -#define WX_GPIO_EXT 0x14850 - -/*********************** Transmit DMA registers **************************/ -/* transmit global control */ -#define WX_TDM_CTL 0x18000 -/* TDM CTL BIT */ -#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ -#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) -#define WX_TDM_RP_IDX 0x1820C -#define WX_TDM_RP_RATE 0x18404 - -/***************************** RDB registers *********************************/ -/* receive packet buffer */ -#define WX_RDB_PB_CTL 0x19000 -#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */ -#define WX_RDB_PB_CTL_DISABLED BIT(0) -#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) -#define WX_RDB_PB_SZ_SHIFT 10 -/* statistic */ -#define WX_RDB_PFCMACDAL 0x19210 -#define WX_RDB_PFCMACDAH 0x19214 -/* ring assignment */ -#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) -#define WX_RDB_PL_CFG_L4HDR BIT(1) -#define WX_RDB_PL_CFG_L3HDR BIT(2) -#define WX_RDB_PL_CFG_L2HDR BIT(3) -#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) -#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) -#define WX_RDB_RA_CTL 0x194F4 -#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */ - -/******************************* PSR Registers *******************************/ -/* psr control */ -#define WX_PSR_CTL 0x15000 -/* Header split receive */ -#define WX_PSR_CTL_SW_EN BIT(18) -#define WX_PSR_CTL_RSC_ACK BIT(17) -#define WX_PSR_CTL_RSC_DIS BIT(16) -#define WX_PSR_CTL_PCSD BIT(13) -#define WX_PSR_CTL_IPPCSE BIT(12) -#define WX_PSR_CTL_BAM BIT(10) -#define WX_PSR_CTL_UPE BIT(9) -#define WX_PSR_CTL_MPE BIT(8) -#define WX_PSR_CTL_MFE BIT(7) -#define WX_PSR_CTL_MO_SHIFT 5 -#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT) -#define WX_PSR_CTL_TPE BIT(4) -#define WX_PSR_MAX_SZ 0x15020 -#define WX_PSR_VLAN_CTL 0x15088 -#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */ -#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */ -/* mcasst/ucast overflow tbl */ -#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) -#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) - -/* VM L2 contorl */ -#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) -#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ -#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ -#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ -#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ -#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ -#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */ -#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */ - -/* Management */ -#define WX_PSR_MNG_FLEX_SEL 0x1582C -#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) -#define WX_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) -#define WX_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) -#define WX_PSR_LAN_FLEX_SEL 0x15B8C -#define WX_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) -#define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) -#define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) - -#define WX_PSR_WKUP_CTL 0x15B80 -/* Wake Up Filter Control Bit */ -#define WX_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */ - -/* vlan tbl */ -#define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) - -/* mac switcher */ -#define WX_PSR_MAC_SWC_AD_L 0x16200 -#define WX_PSR_MAC_SWC_AD_H 0x16204 -#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v) -#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v) -#define WX_PSR_MAC_SWC_AD_H_AV BIT(31) -#define WX_PSR_MAC_SWC_VM_L 0x16208 -#define WX_PSR_MAC_SWC_VM_H 0x1620C -#define WX_PSR_MAC_SWC_IDX 0x16210 -#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU - -/* vlan switch */ -#define WX_PSR_VLAN_SWC 0x16220 -#define WX_PSR_VLAN_SWC_VM_L 0x16224 -#define WX_PSR_VLAN_SWC_VM_H 0x16228 -#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ -/* VLAN pool filtering masks */ -#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */ -#define WX_PSR_VLAN_SWC_ENTRIES 64 - -/********************************* RSEC **************************************/ -/* general rsec */ -#define WX_RSC_CTL 0x17000 -#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6) -#define WX_RSC_CTL_CRC_STRIP BIT(2) -#define WX_RSC_CTL_RX_DIS BIT(1) -#define WX_RSC_ST 0x17004 -#define WX_RSC_ST_RSEC_RDY BIT(0) - -/****************************** TDB ******************************************/ -#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) -#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ - -/****************************** TSEC *****************************************/ -/* Security Control Registers */ -#define WX_TSC_CTL 0x1D000 -#define WX_TSC_CTL_TX_DIS BIT(1) -#define WX_TSC_CTL_TSEC_DIS BIT(0) -#define WX_TSC_ST 0x1D004 -#define WX_TSC_ST_SECTX_RDY BIT(0) -#define WX_TSC_BUF_AE 0x1D00C -#define WX_TSC_BUF_AE_THR GENMASK(9, 0) - -/************************************** MNG ********************************/ -#define WX_MNG_SWFW_SYNC 0x1E008 -#define WX_MNG_SWFW_SYNC_SW_MB BIT(2) -#define WX_MNG_SWFW_SYNC_SW_FLASH BIT(3) -#define WX_MNG_MBOX 0x1E100 -#define WX_MNG_MBOX_CTL 0x1E044 -#define WX_MNG_MBOX_CTL_SWRDY BIT(0) -#define WX_MNG_MBOX_CTL_FWRDY BIT(2) - -/************************************* ETH MAC *****************************/ -#define WX_MAC_TX_CFG 0x11000 -#define WX_MAC_TX_CFG_TE BIT(0) -#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29) -#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0) -#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3) -#define WX_MAC_RX_CFG 0x11004 -#define WX_MAC_RX_CFG_RE BIT(0) -#define WX_MAC_RX_CFG_JE BIT(8) -#define WX_MAC_PKT_FLT 0x11008 -#define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */ -#define WX_MAC_WDG_TIMEOUT 0x1100C -#define WX_MAC_RX_FLOW_CTRL 0x11090 -#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ -/* MDIO Registers */ -#define WX_MSCA 0x11200 -#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) -#define WX_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) -#define WX_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) -#define WX_MSCC 0x11204 -#define WX_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) - -enum WX_MSCA_CMD_value { - WX_MSCA_CMD_RSV = 0, - WX_MSCA_CMD_WRITE, - WX_MSCA_CMD_POST_READ, - WX_MSCA_CMD_READ, -}; - -#define WX_MSCC_SADDR BIT(18) -#define WX_MSCC_BUSY BIT(22) -#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) -#define WX_MMC_CONTROL 0x11800 -#define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ - -/********************************* BAR registers ***************************/ -/* Interrupt Registers */ -#define WX_BME_CTL 0x12020 -#define WX_PX_MISC_IC 0x100 -#define WX_PX_MISC_ICS 0x104 -#define WX_PX_MISC_IEN 0x108 -#define WX_PX_INTA 0x110 -#define WX_PX_GPIE 0x118 -#define WX_PX_GPIE_MODEL BIT(0) -#define WX_PX_IC(_i) (0x120 + (_i) * 4) -#define WX_PX_IMS(_i) (0x140 + (_i) * 4) -#define WX_PX_IMC(_i) (0x150 + (_i) * 4) -#define WX_PX_ISB_ADDR_L 0x160 -#define WX_PX_ISB_ADDR_H 0x164 -#define WX_PX_TRANSACTION_PENDING 0x168 -#define WX_PX_ITRSEL 0x180 -#define WX_PX_ITR(_i) (0x200 + (_i) * 4) -#define WX_PX_ITR_CNT_WDIS BIT(31) -#define WX_PX_MISC_IVAR 0x4FC -#define WX_PX_IVAR(_i) (0x500 + (_i) * 4) - -#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ -#define WX_7K_ITR 595 -#define WX_12K_ITR 336 -#define WX_SP_MAX_EITR 0x00000FF8U -#define WX_EM_MAX_EITR 0x00007FFCU - -/* transmit DMA Registers */ -#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) -#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) -#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) -#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) -#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) -/* Transmit Config masks */ -#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ -#define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ -#define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ -#define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ -#define WX_PX_TR_CFG_THRE_SHIFT 8 - -/* Receive DMA Registers */ -#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) -#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) -#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) -#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) -#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) -/* PX_RR_CFG bit definitions */ -#define WX_PX_RR_CFG_VLAN BIT(31) -#define WX_PX_RR_CFG_SPLIT_MODE BIT(26) -#define WX_PX_RR_CFG_RR_THER_SHIFT 16 -#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) -#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8) -#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) - * + at bit 8 offset (<< 12) - * = (<< 6) - */ -#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ -#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1 -#define WX_PX_RR_CFG_RR_EN BIT(0) - -/* Number of 80 microseconds we wait for PCI Express master disable */ -#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000 - -/****************** Manageablility Host Interface defines ********************/ -#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ -#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */ - -#define FW_READ_SHADOW_RAM_CMD 0x31 -#define FW_READ_SHADOW_RAM_LEN 0x6 -#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ -#define FW_NVM_DATA_OFFSET 3 -#define FW_MAX_READ_BUFFER_SIZE 244 -#define FW_RESET_CMD 0xDF -#define FW_RESET_LEN 0x2 -#define FW_CEM_HDR_LEN 0x4 -#define FW_CEM_CMD_RESERVED 0X0 -#define FW_CEM_MAX_RETRIES 3 -#define FW_CEM_RESP_STATUS_SUCCESS 0x1 - -#define WX_SW_REGION_PTR 0x1C - -#define WX_MAC_STATE_DEFAULT 0x1 -#define WX_MAC_STATE_MODIFIED 0x2 -#define WX_MAC_STATE_IN_USE 0x4 - -#define WX_MAX_RXD 8192 -#define WX_MAX_TXD 8192 - -#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ -#define VMDQ_P(p) p - -/* Supported Rx Buffer Sizes */ -#define WX_RXBUFFER_256 256 /* Used for skb receive header */ -#define WX_RXBUFFER_2K 2048 -#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */ - -#if MAX_SKB_FRAGS < 8 -#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024) -#else -#define WX_RX_BUFSZ WX_RXBUFFER_2K -#endif - -#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define WX_MAX_DATA_PER_TXD BIT(14) -/* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) - -#define WX_CFG_PORT_ST 0x14404 - -/******************* Receive Descriptor bit definitions **********************/ -#define WX_RXD_STAT_DD BIT(0) /* Done */ -#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ -#define WX_RXD_STAT_VP BIT(5) /* IEEE VLAN Pkt */ -#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ -#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ -#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ - -#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */ -#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ -#define WX_RXD_ERR_TCPE BIT(30) /* TCP/UDP Checksum Error */ -#define WX_RXD_ERR_IPE BIT(31) /* IP Checksum Error */ - -/* RSS Hash results */ -#define WX_RXD_RSSTYPE_MASK GENMASK(3, 0) -#define WX_RXD_RSSTYPE_IPV4_TCP 0x00000001U -#define WX_RXD_RSSTYPE_IPV6_TCP 0x00000003U -#define WX_RXD_RSSTYPE_IPV4_SCTP 0x00000004U -#define WX_RXD_RSSTYPE_IPV6_SCTP 0x00000006U -#define WX_RXD_RSSTYPE_IPV4_UDP 0x00000007U -#define WX_RXD_RSSTYPE_IPV6_UDP 0x00000008U - -#define WX_RSS_L4_TYPES_MASK \ - ((1ul << WX_RXD_RSSTYPE_IPV4_TCP) | \ - (1ul << WX_RXD_RSSTYPE_IPV4_UDP) | \ - (1ul << WX_RXD_RSSTYPE_IPV4_SCTP) | \ - (1ul << WX_RXD_RSSTYPE_IPV6_TCP) | \ - (1ul << WX_RXD_RSSTYPE_IPV6_UDP) | \ - (1ul << WX_RXD_RSSTYPE_IPV6_SCTP)) -/* TUN */ -#define WX_PTYPE_TUN_IPV4 0x80 -#define WX_PTYPE_TUN_IPV6 0xC0 - -/* PKT for TUN */ -#define WX_PTYPE_PKT_IPIP 0x00 /* IP+IP */ -#define WX_PTYPE_PKT_IG 0x10 /* IP+GRE */ -#define WX_PTYPE_PKT_IGM 0x20 /* IP+GRE+MAC */ -#define WX_PTYPE_PKT_IGMV 0x30 /* IP+GRE+MAC+VLAN */ -/* PKT for !TUN */ -#define WX_PTYPE_PKT_MAC 0x10 -#define WX_PTYPE_PKT_IP 0x20 - -/* TYP for PKT=mac */ -#define WX_PTYPE_TYP_MAC 0x01 -/* TYP for PKT=ip */ -#define WX_PTYPE_PKT_IPV6 0x08 -#define WX_PTYPE_TYP_IPFRAG 0x01 -#define WX_PTYPE_TYP_IP 0x02 -#define WX_PTYPE_TYP_UDP 0x03 -#define WX_PTYPE_TYP_TCP 0x04 -#define WX_PTYPE_TYP_SCTP 0x05 - -#define WX_RXD_PKTTYPE(_rxd) \ - ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) -#define WX_RXD_IPV6EX(_rxd) \ - ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) -/*********************** Transmit Descriptor Config Masks ****************/ -#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ -#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ -#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */ -#define WX_TXD_EOP BIT(24) /* End of Packet */ -#define WX_TXD_IFCS BIT(25) /* Insert FCS */ -#define WX_TXD_RS BIT(27) /* Report Status */ - -/*********************** Adv Transmit Descriptor Config Masks ****************/ -#define WX_TXD_MAC_TSTAMP BIT(19) /* IEEE1588 time stamp */ -#define WX_TXD_DTYP_CTXT BIT(20) /* Adv Context Desc */ -#define WX_TXD_LINKSEC BIT(26) /* enable linksec */ -#define WX_TXD_VLE BIT(30) /* VLAN pkt enable */ -#define WX_TXD_TSE BIT(31) /* TCP Seg enable */ -#define WX_TXD_CC BIT(7) /* Check Context */ -#define WX_TXD_IPSEC BIT(8) /* enable ipsec esp */ -#define WX_TXD_L4CS BIT(9) -#define WX_TXD_IIPCS BIT(10) -#define WX_TXD_EIPCS BIT(11) -#define WX_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ -#define WX_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define WX_TXD_TAG_TPID_SEL_SHIFT 11 - -#define WX_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define WX_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - -#define WX_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ -#define WX_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ -#define WX_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ -#define WX_TXD_TUNNEL_UDP FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 0) -#define WX_TXD_TUNNEL_GRE FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 1) - -enum wx_tx_flags { - /* cmd_type flags */ - WX_TX_FLAGS_HW_VLAN = 0x01, - WX_TX_FLAGS_TSO = 0x02, - WX_TX_FLAGS_TSTAMP = 0x04, - - /* olinfo flags */ - WX_TX_FLAGS_CC = 0x08, - WX_TX_FLAGS_IPV4 = 0x10, - WX_TX_FLAGS_CSUM = 0x20, - WX_TX_FLAGS_OUTER_IPV4 = 0x100, - WX_TX_FLAGS_LINKSEC = 0x200, - WX_TX_FLAGS_IPSEC = 0x400, -}; - -/* VLAN info */ -#define WX_TX_FLAGS_VLAN_MASK GENMASK(31, 16) -#define WX_TX_FLAGS_VLAN_SHIFT 16 - -/* wx_dec_ptype.mac: outer mac */ -enum wx_dec_ptype_mac { - WX_DEC_PTYPE_MAC_IP = 0, - WX_DEC_PTYPE_MAC_L2 = 2, - WX_DEC_PTYPE_MAC_FCOE = 3, -}; - -/* wx_dec_ptype.[e]ip: outer&encaped ip */ -#define WX_DEC_PTYPE_IP_FRAG 0x4 -enum wx_dec_ptype_ip { - WX_DEC_PTYPE_IP_NONE = 0, - WX_DEC_PTYPE_IP_IPV4 = 1, - WX_DEC_PTYPE_IP_IPV6 = 2, - WX_DEC_PTYPE_IP_FGV4 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV4, - WX_DEC_PTYPE_IP_FGV6 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV6, -}; - -/* wx_dec_ptype.etype: encaped type */ -enum wx_dec_ptype_etype { - WX_DEC_PTYPE_ETYPE_NONE = 0, - WX_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ - WX_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ - WX_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ - WX_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ -}; - -/* wx_dec_ptype.proto: payload proto */ -enum wx_dec_ptype_prot { - WX_DEC_PTYPE_PROT_NONE = 0, - WX_DEC_PTYPE_PROT_UDP = 1, - WX_DEC_PTYPE_PROT_TCP = 2, - WX_DEC_PTYPE_PROT_SCTP = 3, - WX_DEC_PTYPE_PROT_ICMP = 4, - WX_DEC_PTYPE_PROT_TS = 5, /* time sync */ -}; - -/* wx_dec_ptype.layer: payload layer */ -enum wx_dec_ptype_layer { - WX_DEC_PTYPE_LAYER_NONE = 0, - WX_DEC_PTYPE_LAYER_PAY2 = 1, - WX_DEC_PTYPE_LAYER_PAY3 = 2, - WX_DEC_PTYPE_LAYER_PAY4 = 3, -}; - -struct wx_dec_ptype { - u32 known:1; - u32 mac:2; /* outer mac */ - u32 ip:3; /* outer ip*/ - u32 etype:3; /* encaped type */ - u32 eip:3; /* encaped ip */ - u32 prot:4; /* payload proto */ - u32 layer:3; /* payload layer */ -}; - -/* macro to make the table lines short */ -#define WX_PTT(mac, ip, etype, eip, proto, layer)\ - {1, \ - WX_DEC_PTYPE_MAC_##mac, /* mac */\ - WX_DEC_PTYPE_IP_##ip, /* ip */ \ - WX_DEC_PTYPE_ETYPE_##etype, /* etype */\ - WX_DEC_PTYPE_IP_##eip, /* eip */\ - WX_DEC_PTYPE_PROT_##proto, /* proto */\ - WX_DEC_PTYPE_LAYER_##layer /* layer */} - -/* Host Interface Command Structures */ -struct wx_hic_hdr { - u8 cmd; - u8 buf_len; - union { - u8 cmd_resv; - u8 ret_status; - } cmd_or_resp; - u8 checksum; -}; - -struct wx_hic_hdr2_req { - u8 cmd; - u8 buf_lenh; - u8 buf_lenl; - u8 checksum; -}; - -struct wx_hic_hdr2_rsp { - u8 cmd; - u8 buf_lenl; - u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ - u8 checksum; -}; - -union wx_hic_hdr2 { - struct wx_hic_hdr2_req req; - struct wx_hic_hdr2_rsp rsp; -}; - -/* These need to be dword aligned */ -struct wx_hic_read_shadow_ram { - union wx_hic_hdr2 hdr; - u32 address; - u16 length; - u16 pad2; - u16 data; - u16 pad3; -}; - -struct wx_hic_reset { - struct wx_hic_hdr hdr; - u16 lan_id; - u16 reset_type; -}; - -/* Bus parameters */ -struct wx_bus_info { - u8 func; - u16 device; -}; - -struct wx_thermal_sensor_data { - s16 temp; - s16 alarm_thresh; - s16 dalarm_thresh; -}; - -enum wx_mac_type { - wx_mac_unknown = 0, - wx_mac_sp, - wx_mac_em -}; - -enum sp_media_type { - sp_media_unknown = 0, - sp_media_fiber, - sp_media_copper, - sp_media_backplane -}; - -enum em_mac_type { - em_mac_type_unknown = 0, - em_mac_type_mdi, - em_mac_type_rgmii -}; - -struct wx_mac_info { - enum wx_mac_type type; - bool set_lben; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u32 mta_shadow[128]; - s32 mc_filter_type; - u32 mcft_size; - u32 vft_shadow[128]; - u32 vft_size; - u32 num_rar_entries; - u32 rx_pb_size; - u32 tx_pb_size; - u32 max_tx_queues; - u32 max_rx_queues; - - u16 max_msix_vectors; - struct wx_thermal_sensor_data sensor; -}; - -enum wx_eeprom_type { - wx_eeprom_uninitialized = 0, - wx_eeprom_spi, - wx_flash, - wx_eeprom_none /* No NVM support */ -}; - -struct wx_eeprom_info { - enum wx_eeprom_type type; - u32 semaphore_delay; - u16 word_size; - u16 sw_region_offset; -}; - -struct wx_addr_filter_info { - u32 num_mc_addrs; - u32 mta_in_use; - bool user_set_promisc; -}; - -struct wx_mac_addr { - u8 addr[ETH_ALEN]; - u16 state; /* bitmask */ - u64 pools; -}; - -enum wx_reset_type { - WX_LAN_RESET = 0, - WX_SW_RESET, - WX_GLOBAL_RESET -}; - -struct wx_cb { - dma_addr_t dma; - u16 append_cnt; /* number of skb's appended */ - bool page_released; - bool dma_released; -}; - -#define WX_CB(skb) ((struct wx_cb *)(skb)->cb) - -/* Transmit Descriptor */ -union wx_tx_desc { - struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le32 cmd_type_len; - __le32 olinfo_status; - } read; - struct { - __le64 rsvd; /* Reserved */ - __le32 nxtseq_seed; - __le32 status; - } wb; -}; - -/* Receive Descriptor */ -union wx_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - union { - __le32 data; - struct { - __le16 pkt_info; /* RSS, Pkt type */ - __le16 hdr_info; /* Splithdr, hdrlen */ - } hs_rss; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -struct wx_tx_context_desc { - __le32 vlan_macip_lens; - __le32 seqnum_seed; - __le32 type_tucmd_mlhl; - __le32 mss_l4len_idx; -}; - -/* if _flag is in _input, return _result */ -#define WX_SET_FLAG(_input, _flag, _result) \ - (((_flag) <= (_result)) ? \ - ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ - ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) - -#define WX_RX_DESC(R, i) \ - (&(((union wx_rx_desc *)((R)->desc))[i])) -#define WX_TX_DESC(R, i) \ - (&(((union wx_tx_desc *)((R)->desc))[i])) -#define WX_TX_CTXTDESC(R, i) \ - (&(((struct wx_tx_context_desc *)((R)->desc))[i])) - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer - */ -struct wx_tx_buffer { - union wx_tx_desc *next_to_watch; - struct sk_buff *skb; - unsigned int bytecount; - unsigned short gso_segs; - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); - __be16 protocol; - u32 tx_flags; -}; - -struct wx_rx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - dma_addr_t page_dma; - struct page *page; - unsigned int page_offset; -}; - -struct wx_queue_stats { - u64 packets; - u64 bytes; -}; - -struct wx_rx_queue_stats { - u64 csum_good_cnt; - u64 csum_err; -}; - -/* iterator for handling rings in ring container */ -#define wx_for_each_ring(posm, headm) \ - for (posm = (headm).ring; posm; posm = posm->next) - -struct wx_ring_container { - struct wx_ring *ring; /* pointer to linked list of rings */ - unsigned int total_bytes; /* total bytes processed this int */ - unsigned int total_packets; /* total packets processed this int */ - u8 count; /* total number of rings in vector */ - u8 itr; /* current ITR setting for ring */ -}; -struct wx_ring { - struct wx_ring *next; /* pointer to next ring in q_vector */ - struct wx_q_vector *q_vector; /* backpointer to host q_vector */ - struct net_device *netdev; /* netdev ring belongs to */ - struct device *dev; /* device for DMA mapping */ - struct page_pool *page_pool; - void *desc; /* descriptor ring memory */ - union { - struct wx_tx_buffer *tx_buffer_info; - struct wx_rx_buffer *rx_buffer_info; - }; - u8 __iomem *tail; - dma_addr_t dma; /* phys. address of descriptor ring */ - unsigned int size; /* length in bytes */ - - u16 count; /* amount of descriptors */ - - u8 queue_index; /* needed for multiqueue queue management */ - u8 reg_idx; /* holds the special value that gets - * the hardware register offset - * associated with this ring, which is - * different for DCB and RSS modes - */ - u16 next_to_use; - u16 next_to_clean; - u16 next_to_alloc; - - struct wx_queue_stats stats; - struct u64_stats_sync syncp; - union { - struct wx_rx_queue_stats rx_stats; - }; -} ____cacheline_internodealigned_in_smp; - -struct wx_q_vector { - struct wx *wx; - int cpu; /* CPU for DCA */ - int numa_node; - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring - */ - u16 itr; /* Interrupt throttle rate written to EITR */ - struct wx_ring_container rx, tx; - struct napi_struct napi; - struct rcu_head rcu; /* to avoid race with update stats on free */ - - char name[IFNAMSIZ + 17]; - - /* for dynamic allocation of rings associated with this q_vector */ - struct wx_ring ring[] ____cacheline_internodealigned_in_smp; -}; - -enum wx_isb_idx { - WX_ISB_HEADER, - WX_ISB_MISC, - WX_ISB_VEC0, - WX_ISB_VEC1, - WX_ISB_MAX -}; - -struct wx { - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - - void *priv; - u8 __iomem *hw_addr; - struct pci_dev *pdev; - struct net_device *netdev; - struct wx_bus_info bus; - struct wx_mac_info mac; - enum em_mac_type mac_type; - enum sp_media_type media_type; - struct wx_eeprom_info eeprom; - struct wx_addr_filter_info addr_ctrl; - struct wx_mac_addr *mac_table; - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - u16 oem_ssid; - u16 oem_svid; - u16 msg_enable; - bool adapter_stopped; - u16 tpid[8]; - char eeprom_id[32]; - char *driver_name; - enum wx_reset_type reset_type; - - /* PHY stuff */ - unsigned int link; - int speed; - int duplex; - struct phy_device *phydev; - - bool wol_hw_supported; - bool ncsi_enabled; - bool gpio_ctrl; - raw_spinlock_t gpio_lock; - - /* Tx fast path data */ - int num_tx_queues; - u16 tx_itr_setting; - u16 tx_work_limit; - - /* Rx fast path data */ - int num_rx_queues; - u16 rx_itr_setting; - u16 rx_work_limit; - - int num_q_vectors; /* current number of q_vectors for device */ - int max_q_vectors; /* upper limit of q_vectors for device */ - - u32 tx_ring_count; - u32 rx_ring_count; - - struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; - struct wx_ring *rx_ring[64]; - struct wx_q_vector *q_vector[64]; - - unsigned int queues_per_pool; - struct msix_entry *msix_entries; - - /* misc interrupt status block */ - dma_addr_t isb_dma; - u32 *isb_mem; - u32 isb_tag[WX_ISB_MAX]; - -#define WX_MAX_RETA_ENTRIES 128 - u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; - -#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 *rss_key; - u32 wol; - - u16 bd_number; -}; - -#define WX_INTR_ALL (~0ULL) -#define WX_INTR_Q(i) BIT(i) - -/* register operations */ -#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) -#define rd32(a, reg) readl((a)->hw_addr + (reg)) -#define rd32a(a, reg, offset) ( \ - rd32((a), (reg) + ((offset) << 2))) -#define wr32a(a, reg, off, val) \ - wr32((a), (reg) + ((off) << 2), (val)) - -static inline u32 -rd32m(struct wx *wx, u32 reg, u32 mask) -{ - u32 val; - - val = rd32(wx, reg); - return val & mask; -} - -static inline void -wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) -{ - u32 val; - - val = rd32(wx, reg); - val = ((val & ~mask) | (field & mask)); - - wr32(wx, reg, val); -} - -/* On some domestic CPU platforms, sometimes IO is not synchronized with - * flushing memory, here use readl() to flush PCI read and write. - */ -#define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR) - -#define wx_err(wx, fmt, arg...) \ - dev_err(&(wx)->pdev->dev, fmt, ##arg) - -#define wx_dbg(wx, fmt, arg...) \ - dev_dbg(&(wx)->pdev->dev, fmt, ##arg) - -#endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile index 61a13d98abe79e597762b76a44a725993eeb3ad7..d9f34ece1d0f0cf6d34a3c100b3ebb2bc84ce103 100644 --- a/drivers/net/ethernet/wangxun/ngbe/Makefile +++ b/drivers/net/ethernet/wangxun/ngbe/Makefile @@ -1,9 +1,16 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. -# -# Makefile for the Wangxun(R) GbE PCI Express ethernet driver -# - obj-$(CONFIG_NGBE) += ngbe.o -ngbe-objs := ngbe_main.o ngbe_hw.o ngbe_mdio.o ngbe_ethtool.o +ngbe-objs := ngbe_main.o \ + ngbe_hw.o \ + ngbe_phy.o \ + ngbe_ethtool.o \ + ngbe_lib.o \ + ngbe_mbx.o \ + ngbe_sriov.o \ + ngbe_pcierr.o \ + ngbe_param.o \ + ngbe_procfs.o \ + ngbe_ptp.o \ + ngbe_sysfs.o \ + ngbe_debugfs.o \ + ngbe_kcompat.o \ No newline at end of file diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h new file mode 100644 index 0000000000000000000000000000000000000000..97556e1bf69252a888fc746ec4a36d7c066daf0d --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h @@ -0,0 +1,1245 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_H_ +#define _NGBE_H_ + +#ifndef NGBE_NO_LRO +#include +#else +#include +#endif + +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) +#include +#endif + +#include "ngbe_type.h" +#include "ngbe_kcompat.h" + +#ifdef HAVE_XDP_BUFF_RXQ +#include +#endif + +#ifdef HAVE_NDO_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +#ifdef HAVE_SCTP +#include +#endif + +#ifdef HAVE_INCLUDE_LINUX_MDIO_H +#include +#endif + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#include +#endif /* HAVE_PTP_1588_CLOCK */ + +/* Ether Types */ +#define NGBE_ETH_P_LLDP 0x88CC +#define NGBE_ETH_P_CNM 0x22E7 + +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 + +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 + +#define NGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define NGBE_MIN_FCRTL 0x40 +#define NGBE_MAX_FCRTL 0x7FF80 +#define NGBE_MIN_FCRTH 0x600 +#define NGBE_MAX_FCRTH 0x7FFF0 +#define NGBE_DEFAULT_FCPAUSE 0xFFFF +#define NGBE_MIN_FCPAUSE 0 +#define NGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define NGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define NGBE_RXBUFFER_2K 2048 +#define NGBE_RXBUFFER_3K 3072 +#define NGBE_RXBUFFER_4K 4096 +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT +#define NGBE_RXBUFFER_1536 1536 +#define NGBE_RXBUFFER_7K 7168 +#define NGBE_RXBUFFER_8K 8192 +#define NGBE_RXBUFFER_15K 15360 +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +#define NGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define NGBE_RX_HDR_SIZE NGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define NGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#ifdef HAVE_STRUCT_DMA_ATTRS +#define NGBE_RX_DMA_ATTR NULL +#else +#define NGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +/* assume the kernel supports 8021p to avoid stripping vlan tags */ +#ifdef NGBE_DISABLE_8021P_SUPPORT +#ifndef HAVE_8021P_SUPPORT +#define HAVE_8021P_SUPPORT +#endif +#endif /* NGBE_DISABLE_8021P_SUPPORT */ + +enum ngbe_tx_flags { + /* cmd_type flags */ + NGBE_TX_FLAGS_HW_VLAN = 0x01, + NGBE_TX_FLAGS_TSO = 0x02, + NGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + NGBE_TX_FLAGS_CC = 0x08, + NGBE_TX_FLAGS_IPV4 = 0x10, + NGBE_TX_FLAGS_CSUM = 0x20, + NGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + NGBE_TX_FLAGS_LINKSEC = 0x200, + NGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + NGBE_TX_FLAGS_SW_VLAN = 0x40, + NGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define NGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define NGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define NGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define NGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define NGBE_MAX_RX_DESC_POLL 10 + +#define NGBE_MAX_VF_MC_ENTRIES 30 +#define NGBE_MAX_VF_FUNCTIONS 8 +#define MAX_EMULATION_MAC_ADDRS 16 +#define NGBE_MAX_PF_MACVLANS 15 +#define NGBE_VF_DEVICE_ID 0x1000 + +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = rd32(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = rd32(hw, reg_lsb); \ + u64 current_counter_msb = rd32(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } +#ifdef HAVE_XDP_SUPPORT +DECLARE_STATIC_KEY_FALSE(ngbe_xdp_locking_key); +#endif + +#ifndef XDP_PACKET_HEADROOM +#define XDP_PACKET_HEADROOM 256 +#endif + +struct vf_stats { + u64 gprc; + u64 gorc; + u64 gptc; + u64 gotc; + u64 mprc; +}; + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 IOMEM *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[NGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + struct vf_stats vfstats; + struct vf_stats last_vfstats; + struct vf_stats saved_rst_vfstats; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + bool rss_query_enabled; +#endif + u8 trusted; + int xcast_mode; + unsigned int vf_api; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#ifndef NGBE_NO_LRO +#define NGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/ +#define NGBE_LRO_GLOBAL 10 + +struct ngbe_lro_stats { + u32 flushed; + u32 coal; +}; + +/* + * ngbe_lro_header - header format to be aggregated by LRO + * @iph: IP header without options + * @tcp: TCP header + * @ts: Optional TCP timestamp data in TCP options + * + * This structure relies on the check above that verifies that the header + * is IPv4 and does not contain any options. + */ +struct ngbe_lrohdr { + struct iphdr iph; + struct tcphdr th; + __be32 ts[0]; +}; + +struct ngbe_lro_list { + struct sk_buff_head active; + struct ngbe_lro_stats stats; +}; + +#endif /* NGBE_NO_LRO */ +#define NGBE_MAX_TXD_PWR 14 +#define NGBE_MAX_DATA_PER_TXD (1 << NGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), NGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ngbe_tx_buffer { + union ngbe_tx_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ +#ifdef HAVE_XDP_FRAME_STRUCT + struct xdp_frame *xdpf; +#else + void *data; +#endif + }; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ngbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +#endif +}; + +struct ngbe_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct ngbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ngbe_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +#define NGBE_TS_HDR_LEN 8 +enum ngbe_ring_state_t { +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + __NGBE_RX_3K_BUFFER, + __NGBE_RX_BUILD_SKB_ENABLED, +#endif + __NGBE_TX_XPS_INIT_DONE, + __NGBE_TX_DETECT_HANG, + __NGBE_HANG_CHECK_ARMED, + __NGBE_RX_HS_ENABLED, + __NGBE_TX_XDP_RING, +}; + +struct ngbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct ngbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__NGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + + +#define ring_is_hs_enabled(ring) \ + test_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define set_ring_hs_enabled(ring) \ + set_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_xdp(ring) \ + test_bit(__NGBE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__NGBE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__NGBE_TX_XDP_RING, &(ring)->state) + +struct ngbe_ring { + struct ngbe_ring *next; /* pointer to next ring in q_vector */ + struct ngbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device for DMA mapping */ + struct ngbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct ngbe_tx_buffer *tx_buffer_info; + struct ngbe_rx_buffer *rx_buffer_info; + }; + spinlock_t tx_lock; /* used in XDP mode */ + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + +#ifdef HAVE_PTP_1588_CLOCK + unsigned long last_rx_timestamp; + +#endif + u16 rx_buf_len; + union { +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + u16 next_to_alloc; +#endif + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct ngbe_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif + union { + struct ngbe_tx_queue_stats tx_stats; + struct ngbe_rx_queue_stats rx_stats; + }; +#ifdef HAVE_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#endif +} ____cacheline_internodealigned_in_smp; + +enum ngbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define TGB_MAX_RX_QUEUES 16 +#define NGBE_MAX_TX_QUEUES 16 +#define NGBE_MAX_XDP_QS NGBE_MAX_TX_QUEUES + + + +#define NGBE_MAX_RSS_INDICES 8 +#define NGBE_MAX_VMDQ_INDICES 8 +#define NGBE_MAX_FDIR_INDICES 8 +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES 8 +#define NGBE_MAX_L2A_QUEUES 4 +#define NGBE_BAD_L2A_QUEUE 3 + +#define NGBE_MAX_MACVLANS 8 + +struct ngbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int ngbe_rx_bufsz(struct ngbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(NGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + return NGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int ngbe_rx_pg_order(struct ngbe_ring __maybe_unused *ring) +{ + return 0; +} +#define ngbe_rx_pg_size(_ring) (PAGE_SIZE << ngbe_rx_pg_order(_ring)) + +#endif +struct ngbe_ring_container { + struct ngbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ngbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & NGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ngbe_q_vector { + struct ngbe_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct ngbe_ring_container rx, tx; + + struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; +#endif +#ifndef NGBE_NO_LRO + struct ngbe_lro_list lrolist; /* LRO list for queue vector*/ +#endif + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + +#ifdef HAVE_NDO_BUSY_POLL + atomic_t state; +#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct ngbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#ifdef HAVE_NDO_BUSY_POLL +enum ngbe_qv_state_t { + NGBE_QV_STATE_IDLE = 0, + NGBE_QV_STATE_NAPI, + NGBE_QV_STATE_POLL, + NGBE_QV_STATE_DISABLE +}; + +static inline void ngbe_qv_init_lock(struct ngbe_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, NGBE_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ngbe_qv_lock_napi(struct ngbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, NGBE_QV_STATE_IDLE, + NGBE_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != NGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == NGBE_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void ngbe_qv_unlock_napi(struct ngbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != NGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, NGBE_QV_STATE_IDLE); +} + +/* called from ngbe_low_latency_poll() */ +static inline bool ngbe_qv_lock_poll(struct ngbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, NGBE_QV_STATE_IDLE, + NGBE_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != NGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + return rc == NGBE_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void ngbe_qv_unlock_poll(struct ngbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != NGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, NGBE_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ngbe_qv_busy_polling(struct ngbe_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == NGBE_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool ngbe_qv_disable(struct ngbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, NGBE_QV_STATE_IDLE, + NGBE_QV_STATE_DISABLE); + + return rc == NGBE_QV_STATE_IDLE; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +#ifdef NGBE_HWMON + +#define NGBE_HWMON_TYPE_TEMP 0 +#define NGBE_HWMON_TYPE_ALARMTHRESH 1 +#define NGBE_HWMON_TYPE_DALARMTHRESH 2 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor; + char name[19]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; +#endif /* NGBE_HWMON */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define NGBE_70K_ITR 57 +#define NGBE_20K_ITR 200 +#define NGBE_4K_ITR 1024 +#define NGBE_7K_ITR 595 + +/* ngbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ngbe_test_staterr(union ngbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* ngbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 ngbe_desc_unused(struct ngbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define NGBE_RX_DESC(R, i) \ + (&(((union ngbe_rx_desc *)((R)->desc))[i])) +#define NGBE_TX_DESC(R, i) \ + (&(((union ngbe_tx_desc *)((R)->desc))[i])) +#define NGBE_TX_CTXTDESC(R, i) \ + (&(((struct ngbe_tx_context_desc *)((R)->desc))[i])) + +#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +#define NGBE_MAX_MSIX_Q_VECTORS_EMERALD 9 + +struct ngbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +#define NGBE_MAC_STATE_DEFAULT 0x1 +#define NGBE_MAC_STATE_MODIFIED 0x2 +#define NGBE_MAC_STATE_IN_USE 0x4 + +#ifdef NGBE_PROCFS +struct ngbe_therm_proc_data { + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor_data; +}; +#endif + +/* + * Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS NGBE_MAX_MSIX_Q_VECTORS_EMERALD +#define MAX_MSIX_COUNT NGBE_MAX_MSIX_VECTORS_EMERALD + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define NGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define NGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +/** + * ngbe_adapter.flag + **/ +#define NGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define NGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define NGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define NGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#ifndef NGBE_NO_LLI +#define NGBE_FLAG_LLI_PUSH (u32)(1 << 4) +#endif + +#define NGBE_FLAG_TPH_ENABLED (u32)(1 << 6) +#define NGBE_FLAG_TPH_CAPABLE (u32)(1 << 7) +#define NGBE_FLAG_TPH_ENABLED_DATA (u32)(1 << 8) + +#define NGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define NGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define NGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define NGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define NGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define NGBE_FLAG_NEED_ANC_CHECK (u32)(1 << 14) +#define NGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define NGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#define NGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define NGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define NGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define NGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define NGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define NGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define NGBE_FLAG_NEED_ETH_PHY_RESET (u32)(1 << 28) +#define NGBE_FLAG_RX_HS_ENABLED (u32)(1 << 30) +#define NGBE_FLAG_LINKSEC_ENABLED (u32)(1 << 31) +#define NGBE_FLAG_IPSEC_ENABLED (u32)(1 << 5) + +/* preset defaults */ +#define NGBE_FLAGS_SP_INIT (NGBE_FLAG_MSI_CAPABLE \ + | NGBE_FLAG_MSIX_CAPABLE \ + | NGBE_FLAG_MQ_CAPABLE \ + | NGBE_FLAG_SRIOV_CAPABLE) + +/** + * ngbe_adapter.flag2 + **/ +#ifndef NGBE_NO_HW_RSC +#define NGBE_FLAG2_RSC_CAPABLE (1U << 0) +#define NGBE_FLAG2_RSC_ENABLED (1U << 1) +#else +#define NGBE_FLAG2_RSC_CAPABLE (0U) +#define NGBE_FLAG2_RSC_ENABLED (0U) +#endif +#define NGBE_FLAG2_TEMP_SENSOR_CAPABLE (1U << 3) +#define NGBE_FLAG2_TEMP_SENSOR_EVENT (1U << 4) +#define NGBE_FLAG2_SEARCH_FOR_SFP (1U << 5) +#define NGBE_FLAG2_SFP_NEEDS_RESET (1U << 6) +#define NGBE_FLAG2_PF_RESET_REQUESTED (1U << 7) +#define NGBE_FLAG2_FDIR_REQUIRES_REINIT (1U << 8) +#define NGBE_FLAG2_RSS_FIELD_IPV4_UDP (1U << 9) +#define NGBE_FLAG2_RSS_FIELD_IPV6_UDP (1U << 10) +#define NGBE_FLAG2_RSS_ENABLED (1U << 12) +#define NGBE_FLAG2_PTP_PPS_ENABLED (1U << 11) +#define NGBE_FLAG2_EEE_CAPABLE (1U << 14) +#define NGBE_FLAG2_EEE_ENABLED (1U << 15) +#define NGBE_FLAG2_VXLAN_REREG_NEEDED (1U << 16) +#define NGBE_FLAG2_DEV_RESET_REQUESTED (1U << 18) +#define NGBE_FLAG2_RESET_INTR_RECEIVED (1U << 19) +#define NGBE_FLAG2_GLOBAL_RESET_REQUESTED (1U << 20) +#define NGBE_FLAG2_MNG_REG_ACCESS_DISABLED (1U << 22) +#define NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP (1U << 23) +#define NGBE_FLAG2_ECC_ERR_RESET (1U << 24) +#define NGBE_FLAG2_PCIE_NEED_RECOVER (1U << 31) + +#define NGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +enum ngbe_isb_idx { + NGBE_ISB_HEADER, + NGBE_ISB_MISC, + NGBE_ISB_VEC0, + NGBE_ISB_VEC1, + NGBE_ISB_MAX +}; + +/* board specific private data structure */ +struct ngbe_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ||\ + defined(NETIF_F_HW_VLAN_STAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; /* must be first, see ngbe_receive_skb */ +#else /* HAVE_VLAN_RX_REGISTER */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif /* HAVE_VLAN_RX_REGISTER */ +#endif + /* OS defined structs */ + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct pci_dev *pdev; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; + u32 flags2; + u32 led_conf; + u32 gphy_efuse[2]; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + + /* XDP */ + int num_xdp_queues; + struct ngbe_ring *xdp_ring[NGBE_MAX_XDP_QS]; + + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + + /* TX */ + struct ngbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct ngbe_ring *rx_ring[MAX_RX_QUEUES]; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct ngbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + +#ifdef HAVE_DCBNL_IEEE + struct ieee_pfc *ngbe_ieee_pfc; + struct ieee_ets *ngbe_ieee_ets; +#endif + enum ngbe_fc_mode last_lfc_mode; + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + struct ngbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + u16 irq_remap_offset; + struct msix_entry *msix_entries; + u16 old_rss_limit; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif +#ifndef NGBE_NO_LRO + struct ngbe_lro_stats lro_stats; +#endif + +#ifdef ETHTOOL_TEST + u64 test_icr; + struct ngbe_ring test_tx_ring; + struct ngbe_ring test_rx_ring; +#endif + + /* structs defined in ngbe_hw.h */ + struct ngbe_hw hw; + u16 msg_enable; + struct ngbe_hw_stats stats; +#ifndef NGBE_NO_LLI + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; +#endif /* NGBE_NO_LLI */ + + struct ngbe_queue_stats old_rx_qstats[MAX_RX_QUEUES]; + struct ngbe_queue_stats old_tx_qstats[MAX_TX_QUEUES]; + struct ngbe_tx_queue_stats old_tx_stats[MAX_TX_QUEUES]; + struct ngbe_rx_queue_stats old_rx_stats[MAX_RX_QUEUES]; + + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int xdp_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + struct timer_list link_check_timer; + + u32 atr_sample_rate; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bd_number; + +#ifdef HAVE_BRIDGE_ATTRIBS + u16 bridge_mode; +#endif + + char eeprom_id[32]; + u16 eeprom_cap; + bool netdev_registered; + u32 interrupt_event; +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + u32 led_reg; +#endif + +#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp) (struct ngbe_adapter *); +#endif /* HAVE_PTP_1588_CLOCK */ + + DECLARE_BITMAP(active_vfs, NGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; +#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; +#endif + struct ngbe_mac_addr *mac_table; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + __le16 vxlan_port; +#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + __le16 geneve_port; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef NGBE_SYSFS +#ifdef NGBE_HWMON + struct hwmon_buff ngbe_hwmon_buff; +#endif /* NGBE_HWMON */ +#else /* NGBE_SYSFS */ +#ifdef NGBE_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + u64 old_lsc; + struct proc_dir_entry *therm_dir; + struct ngbe_therm_proc_data therm_data; +#endif /* NGBE_PROCFS */ +#endif /* NGBE_SYSFS */ + +#ifdef HAVE_NGBE_DEBUG_FS + struct dentry *ngbe_dbg_adapter; +#endif /*HAVE_NGBE_DEBUG_FS*/ + u8 default_up; +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + unsigned int indices; +#endif /* !HAVE_NETDEV_SELECT_QUEUE*/ +#endif /* HAVE_TX_MQ */ + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; + +#define NGBE_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES]; +#define NGBE_RSS_KEY_SIZE 40 + u32 rss_key[NGBE_RSS_KEY_SIZE / sizeof(u32)]; + + void *ipsec; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[NGBE_ISB_MAX]; + + u32 hang_cnt; + u64 eth_priv_flags; +#define NGBE_ETH_PRIV_FLAG_LLDP BIT(0) +}; + +static inline u32 ngbe_misc_isb(struct ngbe_adapter *adapter, + enum ngbe_isb_idx idx) +{ + u32 cur_tag = 0; + + cur_tag = adapter->isb_mem[NGBE_ISB_HEADER]; + adapter->isb_tag[idx] = cur_tag; + + return cpu_to_le32(adapter->isb_mem[idx]); +} + +static inline u8 ngbe_max_rss_indices(struct ngbe_adapter *adapter) +{ + if (adapter->xdp_prog) + return NGBE_MAX_RSS_INDICES / 2; + return NGBE_MAX_RSS_INDICES; +} + +enum ngbe_state_t { + __NGBE_TESTING, + __NGBE_RESETTING, + __NGBE_DOWN, + __NGBE_HANGING, + __NGBE_DISABLED, + __NGBE_REMOVING, + __NGBE_SERVICE_SCHED, + __NGBE_SERVICE_INITED, + __NGBE_IN_SFP_INIT, + __NGBE_NO_PHY_SET, +#ifdef HAVE_PTP_1588_CLOCK + __NGBE_PTP_RUNNING, + __NGBE_PTP_TX_IN_PROGRESS, +#endif +}; + +struct ngbe_cb { +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + dma_addr_t dma; +#ifndef NGBE_NO_LRO + __be32 tsecr; /* timestamp echo response */ + u32 tsval; /* timestamp value in host order */ + u32 next_seq; /* next expected sequence number */ + u16 free; /* 65521 minus total size */ + u16 mss; /* size of data portion of packet */ +#endif /* NGBE_NO_LRO */ +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + bool page_released; + bool dma_released; +#endif +}; +#define NGBE_CB(skb) ((struct ngbe_cb *)(skb)->cb) + +/* ESX ngbe CIM IOCTL definition */ + +#ifdef NGBE_SYSFS +void ngbe_sysfs_exit(struct ngbe_adapter *adapter); +int ngbe_sysfs_init(struct ngbe_adapter *adapter); +#endif /* NGBE_SYSFS */ +#ifdef NGBE_PROCFS +void ngbe_procfs_exit(struct ngbe_adapter *adapter); +int ngbe_procfs_init(struct ngbe_adapter *adapter); +int ngbe_procfs_topdir_init(void); +void ngbe_procfs_topdir_exit(void); +#endif /* NGBE_PROCFS */ + +/* needed by ngbe_main.c */ +int ngbe_validate_mac_addr(u8 *mc_addr); +void ngbe_check_options(struct ngbe_adapter *adapter); +void ngbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by ngbe_ethtool.c */ +extern char ngbe_driver_name[]; +extern const char ngbe_driver_version[]; + +void ngbe_irq_disable(struct ngbe_adapter *adapter); +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush); +int ngbe_open(struct net_device *netdev); +int ngbe_close(struct net_device *netdev); +void ngbe_up(struct ngbe_adapter *adapter); +void ngbe_down(struct ngbe_adapter *adapter); +void ngbe_reinit_locked(struct ngbe_adapter *adapter); +void ngbe_reset(struct ngbe_adapter *adapter); +void ngbe_set_ethtool_ops(struct net_device *netdev); +int ngbe_setup_rx_resources(struct ngbe_ring *); +int ngbe_setup_tx_resources(struct ngbe_ring *); +void ngbe_free_rx_resources(struct ngbe_ring *); +void ngbe_free_tx_resources(struct ngbe_ring *); +void ngbe_configure_rx_ring(struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_configure_tx_ring(struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_update_stats(struct ngbe_adapter *adapter); +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter); +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter); +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *, + struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *, + struct ngbe_tx_buffer *); +void ngbe_alloc_rx_buffers(struct ngbe_ring *, u16); + +void ngbe_set_rx_mode(struct net_device *netdev); +int ngbe_write_mc_addr_list(struct net_device *netdev); +int ngbe_setup_tc(struct net_device *dev, u8 tc, bool save_stats); +void ngbe_tx_ctxtdesc(struct ngbe_ring *, u32, u32, u32, u32); +void ngbe_do_reset(struct net_device *netdev); +void ngbe_write_eitr(struct ngbe_q_vector *q_vector); +int ngbe_poll(struct napi_struct *napi, int budget); +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *); +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter); +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr); +#endif +void ngbe_print_tx_hang_status(struct ngbe_adapter *adapter); + +#ifdef HAVE_NGBE_DEBUG_FS +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter); +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter); +void ngbe_dbg_init(void); +void ngbe_dbg_exit(void); +void ngbe_dump(struct ngbe_adapter *adapter); +#endif /* HAVE_NGBE_DEBUG_FS */ + +static inline struct netdev_queue *txring_txq(const struct ngbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ngbe_wol_supported(struct ngbe_adapter *adapter); +int ngbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool); +void ngbe_full_sync_mac_table(struct ngbe_adapter *adapter); +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, + u8 *addr, u16 pool); +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, + u8 *addr, u16 pool); +int ngbe_available_rars(struct ngbe_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +void ngbe_vlan_mode(struct net_device *, u32); +#endif + +#ifdef HAVE_PTP_1588_CLOCK +void ngbe_ptp_init(struct ngbe_adapter *adapter); +void ngbe_ptp_stop(struct ngbe_adapter *adapter); +void ngbe_ptp_suspend(struct ngbe_adapter *adapter); +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb); +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter); +void ngbe_ptp_reset(struct ngbe_adapter *adapter); +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter); +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef CONFIG_PCI_IOV +void ngbe_sriov_reinit(struct ngbe_adapter *adapter); +#endif + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter); + +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter); +void ngbe_store_reta(struct ngbe_adapter *adapter); + +/** + * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. + * disable a interrupt by writing to PX_IMS with the corresponding bit=1 + * enable a interrupt by writing to PX_IMC with the corresponding bit=1 + * trigger a interrupt by writing to PX_ICS with the corresponding bit=1 + **/ +//#define NGBE_INTR_ALL (~0ULL) +#define NGBE_INTR_ALL 0x1FF +#define NGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors) +#define NGBE_INTR_MISC_VMDQ(A) (1ULL << ((A)->num_q_vectors + (A)->ring_feature[RING_F_VMDQ].offset)) +#define NGBE_INTR_QALL(A) (NGBE_INTR_MISC(A) - 1) +#define NGBE_INTR_Q(i) (1ULL << (i)) +static inline void ngbe_intr_enable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) { + wr32(hw, NGBE_PX_IMC, mask); + } +} + +static inline void ngbe_intr_disable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_IMS, mask); +} + +static inline void ngbe_intr_trigger(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_ICS, mask); +} + +#define NGBE_RING_SIZE(R) ((R)->count < NGBE_MAX_TXD ? (R)->count / 128 : 0) + +#endif /* _NGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..9a203a655c5c7b109a171bc77694002fec0cfe45 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c @@ -0,0 +1,778 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" + +#ifdef HAVE_NGBE_DEBUG_FS +#include +#include + +static struct dentry *ngbe_dbg_root; +static int ngbe_data_mode; + +#define NGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF) +#define NGBE_DATA_ARGS(dm) ((dm) & 0xFFFF) +enum ngbe_data_func { + NGBE_FUNC_NONE = (0 << 16), + NGBE_FUNC_DUMP_BAR = (1 << 16), + NGBE_FUNC_DUMP_RDESC = (2 << 16), + NGBE_FUNC_DUMP_TDESC = (3 << 16), + NGBE_FUNC_FLASH_READ = (4 << 16), + NGBE_FUNC_FLASH_WRITE = (5 << 16), +}; + +/** + * data operation + **/ +static ssize_t +ngbe_simple_read_from_pcibar(struct ngbe_adapter *adapter, int res, + void __user *buf, size_t size, loff_t *ppos) +{ + loff_t pos = *ppos; + u32 miss, len, limit = pci_resource_len(adapter->pdev, res); + + if (pos < 0) + return 0; + + limit = (pos + size <= limit ? pos + size : limit); + for (miss = 0; pos < limit && !miss; buf += len, pos += len) { + u32 val = 0, reg = round_down(pos, 4); + u32 off = pos - reg; + + len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4)); + val = ngbe_rd32(adapter->io_addr + reg); + miss = copy_to_user(buf, &val + off, len); + } + + size = pos - *ppos - miss; + *ppos += size; + + return size; +} + +static ssize_t +ngbe_simple_read_from_flash(struct ngbe_adapter *adapter, + void __user *buf, size_t size, loff_t *ppos) +{ + struct ngbe_hw *hw = &adapter->hw; + loff_t pos = *ppos; + size_t ret = 0; + loff_t rpos, rtail; + void __user *to = buf; + size_t available = adapter->hw.flash.dword_size << 2; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !size) + return 0; + if (size > available - pos) + size = available - pos; + + rpos = round_up(pos, 4); + rtail = round_down(pos + size, 4); + if (rtail < rpos) + return 0; + + to += rpos - pos; + while (rpos <= rtail) { + u32 value = ngbe_rd32(adapter->io_addr + rpos); + if (hw->flash.ops.write_buffer(hw, rpos>>2, 1, &value)) { + ret = size; + break; + } + if (4 == copy_to_user(to, &value, 4)) { + ret = size; + break; + } + to += 4; + rpos += 4; + } + + if (ret == size) + return -EFAULT; + size -= ret; + *ppos = pos + size; + return size; +} + +static ssize_t +ngbe_simple_write_to_flash(struct ngbe_adapter *adapter, + const void __user *from, size_t size, loff_t *ppos, size_t available) +{ + return size; +} + +static ssize_t +ngbe_dbg_data_ops_read(struct file *filp, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + rmb(); + + switch (func) { + case NGBE_FUNC_DUMP_BAR: { + u32 bar = NGBE_DATA_ARGS(ngbe_data_mode); + + return ngbe_simple_read_from_pcibar(adapter, bar, buffer, size, + ppos); + } + case NGBE_FUNC_FLASH_READ: { + return ngbe_simple_read_from_flash(adapter, buffer, size, ppos); + } + case NGBE_FUNC_DUMP_RDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_rx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->rx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + case NGBE_FUNC_DUMP_TDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_tx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->tx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + default: + break; + } + + return 0; +} + +static ssize_t +ngbe_dbg_data_ops_write(struct file *filp, + const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + rmb(); + + switch (func) { + case NGBE_FUNC_FLASH_WRITE: { + u32 size = NGBE_DATA_ARGS(ngbe_data_mode); + + if (size > adapter->hw.flash.dword_size << 2) + size = adapter->hw.flash.dword_size << 2; + + return ngbe_simple_write_to_flash(adapter, buffer, size, ppos, size); + } + default: + break; + } + + return size; +} +static struct file_operations ngbe_dbg_data_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_data_ops_read, + .write = ngbe_dbg_data_ops_write, +}; + +/** + * reg_ops operation + **/ +static char ngbe_dbg_reg_ops_buf[256] = ""; +static ssize_t +ngbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *pc = ngbe_dbg_reg_ops_buf; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_reg_ops_buf, + sizeof(ngbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + pc[len] = '\0'; + + if (strncmp(pc, "dump", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 4; + pc += strspn(pc, " \t"); + + if (!strncmp(pc, "bar", 3)) { + pc += 3; + mode = NGBE_FUNC_DUMP_BAR; + } else if (!strncmp(pc, "rdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_RDESC; + } else if (!strncmp(pc, "tdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_TDESC; + } else { + ngbe_dump(adapter); + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + ngbe_data_mode = mode; + } else if (strncmp(pc, "flash", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 5; + pc += strspn(pc, " \t"); + if (!strncmp(pc, "read", 3)) { + pc += 4; + mode = NGBE_FUNC_FLASH_READ; + } else if (!strncmp(pc, "write", 5)) { + pc += 5; + mode = NGBE_FUNC_FLASH_WRITE; + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + ngbe_data_mode = mode; + } else if (strncmp(ngbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ngbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + wr32(&adapter->hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(ngbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ngbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = rd32(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", ngbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations ngbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_reg_ops_read, + .write = ngbe_dbg_reg_ops_write, +}; + +/** + * netdev_ops operation + **/ +static char ngbe_dbg_netdev_ops_buf[256] = ""; +static ssize_t +ngbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_netdev_ops_buf, + sizeof(ngbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ngbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ngbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { +#if defined(HAVE_TX_TIMEOUT_TXQUEUE) + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, 0); +#elif defined(HAVE_NET_DEVICE_OPS) + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#else + adapter->netdev->tx_timeout(adapter->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ngbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static struct file_operations ngbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_netdev_ops_read, + .write = ngbe_dbg_netdev_ops_write, +}; + +/** + * ngbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + + adapter->ngbe_dbg_adapter = debugfs_create_dir(name, ngbe_dbg_root); + if (!adapter->ngbe_dbg_adapter) { + e_dev_err("debugfs entry for %s failed\n", name); + return; + } + + pfile = debugfs_create_file("data", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_data_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); +} + +/** + * ngbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter) +{ + if (adapter->ngbe_dbg_adapter) + debugfs_remove_recursive(adapter->ngbe_dbg_adapter); + adapter->ngbe_dbg_adapter = NULL; +} + +/** + * ngbe_dbg_init - start up debugfs for the driver + **/ +void ngbe_dbg_init(void) +{ + ngbe_dbg_root = debugfs_create_dir(ngbe_driver_name, NULL); + if (ngbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ngbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ngbe_dbg_exit(void) +{ + debugfs_remove_recursive(ngbe_dbg_root); +} + +#endif /* HAVE_NGBE_DEBUG_FS */ + +struct ngbe_reg_info { + u32 offset; + u32 length; + char *name; +}; + +static struct ngbe_reg_info ngbe_reg_info_tbl[] = { + + /* General Registers */ + {NGBE_CFG_PORT_CTL, 1, "CTRL"}, + {NGBE_CFG_PORT_ST, 1, "STATUS"}, + + /* RX Registers */ + {NGBE_PX_RR_CFG(0), 1, "SRRCTL"}, + {NGBE_PX_RR_RP(0), 1, "RDH"}, + {NGBE_PX_RR_WP(0), 1, "RDT"}, + {NGBE_PX_RR_CFG(0), 1, "RXDCTL"}, + {NGBE_PX_RR_BAL(0), 1, "RDBAL"}, + {NGBE_PX_RR_BAH(0), 1, "RDBAH"}, + + /* TX Registers */ + {NGBE_PX_TR_BAL(0), 1, "TDBAL"}, + {NGBE_PX_TR_BAH(0), 1, "TDBAH"}, + {NGBE_PX_TR_RP(0), 1, "TDH"}, + {NGBE_PX_TR_WP(0), 1, "TDT"}, + {NGBE_PX_TR_CFG(0), 1, "TXDCTL"}, + + /* MACVLAN */ + {NGBE_PSR_MAC_SWC_VM, 128, "PSR_MAC_SWC_VM"}, + {NGBE_PSR_MAC_SWC_AD_L, 32, "PSR_MAC_SWC_AD"}, + {NGBE_PSR_VLAN_TBL(0), 128, "PSR_VLAN_TBL"}, + + /* List Terminator */ + { .name = NULL } +}; + +/** + * ngbe_regdump - register printout routine + **/ +static void +ngbe_regdump(struct ngbe_hw *hw, struct ngbe_reg_info *reg_info) +{ +#if 0 + int i, n = 0; + u32 buffer[32*8]; + + switch (reg_info->offset) { + case NGBE_PSR_MAC_SWC_AD_L: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + } + break; + default: + for (i = 0; i < reg_info->length; i++) { + buffer[n++] = rd32(hw, + reg_info->offset + 4*i); + } + break; + } + for (i = 0; n && i < 32; i++) { + pr_info("%-20s[%02x-%02x]", reg_info->name, i*8, i*8 + 7); + for (j = 0; n && j < 8; j++, n--) + pr_cont(" %08x", buffer[i*8 + j]); + pr_cont("\n"); + } + BUG_ON(n); +#endif +} + +/** + * ngbe_dump - Print registers, tx-rings and rx-rings + **/ +void ngbe_dump(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_reg_info *reg_info; + int n = 0; + struct ngbe_ring *tx_ring; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ngbe_ring *rx_ring; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer_info; + u32 staterr; +#endif + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reg_info = ngbe_reg_info_tbl; reg_info->name; reg_info++) { + ngbe_regdump(hw, reg_info); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Transmit Descriptor (Read) + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * Transmit Descriptor (Write-Back) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = NGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX " + "%08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Receive Descriptor Formats + * + * Receive Descriptor (Read) + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Receive Descriptor (Write-Back) + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = NGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & NGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->page_dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->page_dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + ngbe_rx_bufsz(rx_ring), true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } +#endif +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index ec0e869e9aacfe4708d082f05e9ac7f693c8bed6..801d2444e91e85fe20d0ff5f5bec43b80c1746ea 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -1,57 +1,3921 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ +/* ethtool support for ngbe */ + +#include +#include #include -#include #include +#include +#include +#include +#include +#ifdef SIOCETHTOOL +#include + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#ifdef HAVE_ETHTOOL_GET_TS_INFO +#include +#endif + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#define NGBE_ALL_RAR_ENTRIES 16 + +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif + +#ifdef HAVE_XDP_SUPPORT +#include +#endif + +#ifdef ETHTOOL_GSTATS +struct ngbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct ngbe_stats ngbe_gstrings_net_stats[] = { + NGBE_NETDEV_STAT(rx_packets), + NGBE_NETDEV_STAT(tx_packets), + NGBE_NETDEV_STAT(rx_bytes), + NGBE_NETDEV_STAT(tx_bytes), + NGBE_NETDEV_STAT(rx_errors), + NGBE_NETDEV_STAT(tx_errors), + NGBE_NETDEV_STAT(rx_dropped), + NGBE_NETDEV_STAT(tx_dropped), + NGBE_NETDEV_STAT(collisions), + NGBE_NETDEV_STAT(rx_over_errors), + NGBE_NETDEV_STAT(rx_crc_errors), + NGBE_NETDEV_STAT(rx_frame_errors), + NGBE_NETDEV_STAT(rx_fifo_errors), + NGBE_NETDEV_STAT(rx_missed_errors), + NGBE_NETDEV_STAT(tx_aborted_errors), + NGBE_NETDEV_STAT(tx_carrier_errors), + NGBE_NETDEV_STAT(tx_fifo_errors), + NGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define NGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct ngbe_adapter, _stat), \ + .stat_offset = offsetof(struct ngbe_adapter, _stat) \ +} +static struct ngbe_stats ngbe_gstrings_stats[] = { + NGBE_STAT("rx_pkts_nic", stats.gprc), + NGBE_STAT("tx_pkts_nic", stats.gptc), + NGBE_STAT("rx_bytes_nic", stats.gorc), + NGBE_STAT("tx_bytes_nic", stats.gotc), + NGBE_STAT("lsc_int", lsc_int), + NGBE_STAT("tx_busy", tx_busy), + NGBE_STAT("non_eop_descs", non_eop_descs), + NGBE_STAT("rx_broadcast", stats.bprc), + NGBE_STAT("tx_broadcast", stats.bptc), + NGBE_STAT("rx_multicast", stats.mprc), + NGBE_STAT("tx_multicast", stats.mptc), + NGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + NGBE_STAT("tx_timeout_count", tx_timeout_count), + NGBE_STAT("tx_restart_queue", restart_queue), + NGBE_STAT("rx_long_length_count", stats.roc), + NGBE_STAT("rx_short_length_count", stats.ruc), + NGBE_STAT("tx_flow_control_xon", stats.lxontxc), + NGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + NGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + NGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + NGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + NGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + NGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + NGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +#ifndef NGBE_NO_LRO + NGBE_STAT("lro_aggregated", lro_stats.coal), + NGBE_STAT("lro_flushed", lro_stats.flushed), +#endif /* NGBE_NO_LRO */ + NGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + NGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + NGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + NGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + NGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), +#ifdef HAVE_PTP_1588_CLOCK + NGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + NGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +#endif /* HAVE_PTP_1588_CLOCK */ +}; + +/* ngbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define NGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define NGBE_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define NGBE_NUM_RX_QUEUES adapter->indices +#define NGBE_NUM_TX_QUEUES adapter->indices +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define NGBE_NUM_RX_QUEUES 1 +#define NGBE_NUM_TX_QUEUES ( \ + ((struct ngbe_adapter *)netdev_priv(netdev))->num_tx_queues) +#endif /* HAVE_TX_MQ */ + +#define NGBE_QUEUE_STATS_LEN ( \ + (NGBE_NUM_TX_QUEUES + NGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ngbe_queue_stats) / sizeof(u64))) +#define NGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ngbe_gstrings_stats) +#define NGBE_NETDEV_STATS_LEN ARRAY_SIZE(ngbe_gstrings_net_stats) +#define NGBE_PB_STATS_LEN ( \ + (sizeof(((struct ngbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define NGBE_VF_STATS_LEN \ + ((((struct ngbe_adapter *)netdev_priv(netdev))->num_vfs) * \ + (sizeof(struct vf_stats) / sizeof(u64))) +#define NGBE_STATS_LEN (NGBE_GLOBAL_STATS_LEN + \ + NGBE_NETDEV_STATS_LEN + \ + NGBE_PB_STATS_LEN + \ + NGBE_QUEUE_STATS_LEN + \ + NGBE_VF_STATS_LEN) + +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char ngbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define NGBE_TEST_LEN (sizeof(ngbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +struct ngbe_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u64 flag; + bool read_only; +}; + +#define NGBE_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct ngbe_priv_flags ngbe_gstrings_priv_flags[] = { + NGBE_PRIV_FLAG("lldp", NGBE_ETH_PRIV_FLAG_LLDP, 0), +}; + +#define NGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ngbe_gstrings_priv_flags) + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ngbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + bool link_up = 0; + u16 yt_mode = 0; + unsigned long flags; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if ((hw->phy.type == ngbe_phy_m88e1512_sfi) || + (hw->phy.type == ngbe_phy_internal_yt8521s_sfi)) { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); +#endif + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &yt_mode); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if((yt_mode & 7) == 0) {//utp_to_rgmii + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + } else { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, supported,1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, supported,1000baseT_Full); +#endif + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported,100baseT_Full); + } + } else if ((hw->phy.type == ngbe_phy_internal) || + (hw->phy.type == ngbe_phy_m88e1512)) { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + + } else { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + } + + /* set the advertised speeds */ + if ((hw->phy.type == ngbe_phy_m88e1512_sfi) || + (hw->phy.type == ngbe_phy_internal_yt8521s_sfi)) { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } + }else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + if ((yt_mode & 7) == 0) { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + } + } else { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +#endif + + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } + } + } else { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + } + } + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (autoneg) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ngbe_phy_internal: + case ngbe_phy_m88e1512: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case ngbe_phy_yt8521s_sfi: + if((yt_mode & 7) == 0) {//utp_to_rgmii + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + } else { + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + } + break; + case ngbe_phy_internal_yt8521s_sfi: + case ngbe_phy_m88e1512_sfi: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case ngbe_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case ngbe_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + break; + case ngbe_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case NGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case NGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + return 0; +} +#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +int ngbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + bool link_up = 0; + u16 value = 0; + unsigned long flags; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + } else { + if (hw->phy.force_speed & NGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw->phy.force_speed & NGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.force_speed & NGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + } + + ecmd->supported |= SUPPORTED_Autoneg; + if (autoneg) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ngbe_phy_internal: + case ngbe_phy_m88e1512: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ngbe_phy_sfp_passive_tyco: + case ngbe_phy_sfp_passive_unknown: + case ngbe_phy_sfp_ftl: + case ngbe_phy_sfp_avago: + case ngbe_phy_sfp_intel: + case ngbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ngbe_sfp_type_da_cu: + case ngbe_sfp_type_da_cu_core0: + case ngbe_sfp_type_da_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case ngbe_sfp_type_sr: + case ngbe_sfp_type_lr: + case ngbe_sfp_type_srlr_core0: + case ngbe_sfp_type_srlr_core1: + case ngbe_sfp_type_1g_sx_core0: + case ngbe_sfp_type_1g_sx_core1: + case ngbe_sfp_type_1g_lx_core0: + case ngbe_sfp_type_1g_lx_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ngbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ngbe_sfp_type_1g_cu_core0: + case ngbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ngbe_sfp_type_unknown: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + break; + case ngbe_phy_yt8521s_sfi: + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if((value & 7) == 0) {/* utp_to_rgmii */ + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + break; + case ngbe_phy_internal_yt8521s_sfi: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ngbe_phy_unknown: + case ngbe_phy_generic: + case ngbe_phy_sfp_unsupported: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case ngbe_fc_full: + ecmd->advertising |= ADVERTISED_Pause; + break; + case ngbe_fc_rx_pause: + ecmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case ngbe_fc_tx_pause: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_1GB_FULL: + ecmd->speed = SPEED_1000; + break; + case NGBE_LINK_SPEED_100_FULL: + ecmd->speed = SPEED_100; + break; + case NGBE_LINK_SPEED_10_FULL: + ecmd->speed = SPEED_10; + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + return 0; +} +#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ngbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 advertised; + int err = 0; + struct ethtool_link_ksettings temp_ks; + + if (!netif_running(netdev)) + return -EINVAL; + + if ((hw->phy.media_type == ngbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + /* To be compatible with test cases */ + if ((hw->phy.type == ngbe_phy_m88e1512_sfi) || + (hw->phy.type == ngbe_phy_yt8521s_sfi) || + (hw->phy.type == ngbe_phy_internal_yt8521s_sfi)) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); +#endif + } + } + + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + advertised = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = true; +#ifndef HAVE_NOT_SUPPORTED_1000baseX_Full + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full)) +#else + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) +#endif + advertised |= NGBE_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) + advertised |= NGBE_LINK_SPEED_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) + advertised |= NGBE_LINK_SPEED_10_FULL; + } else { + if (cmd->base.duplex == DUPLEX_HALF) { + e_err(probe, "unsupported duplex\n"); + return -EINVAL; + } + + switch (cmd->base.speed) { + case SPEED_10: + advertised = NGBE_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertised = NGBE_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertised = NGBE_LINK_SPEED_1GB_FULL; + break; + default: + e_err(probe, "unsupported speed\n"); + return -EINVAL; + } + + hw->mac.autoneg = false; + } + + hw->mac.autotry_restart = true; + hw->phy.ops.setup_link(hw, advertised, true); + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->phy.autoneg_advertised = advertised; + } else { + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_UNKNOWN; + hw->phy.force_speed = advertised; + } + } else { + /* in this case we currently only support 1Gb/FULL */ + u32 speed = cmd->base.speed; + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (!ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) || + (speed + cmd->base.duplex != SPEED_1000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +static int ngbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 advertised, old; + int err = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + if ((hw->phy.media_type == ngbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (ecmd->advertising & ~ecmd->supported) { + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = true; + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= NGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= NGBE_LINK_SPEED_100_FULL; + + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= NGBE_LINK_SPEED_10_FULL; + + + } else { + if (ecmd->duplex == DUPLEX_HALF) { + e_err(probe, "unsupported duplex\n"); + return -EINVAL; + } + + switch (ecmd->speed) { + case SPEED_10: + advertised = NGBE_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertised = NGBE_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertised = NGBE_LINK_SPEED_1GB_FULL; + break; + default: + e_err(probe, "unsupported speed\n"); + return -EINVAL; + } + hw->mac.autoneg = false; + } + + hw->mac.autotry_restart = true; + hw->phy.ops.setup_link(hw, advertised, true); + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->phy.autoneg_advertised = advertised; + } else { + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_UNKNOWN; + hw->phy.force_speed = advertised; + } + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} +#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +static void ngbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + if (!hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == ngbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ngbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_fc_info fc = hw->fc; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ngbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = ngbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = ngbe_fc_tx_pause; + else + fc.requested_mode = ngbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ngbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} + +static u32 ngbe_get_msglevel(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ngbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ngbe_get_regs_len(struct net_device __always_unused *netdev) +{ +#define NGBE_REGS_LEN 4096 + return NGBE_REGS_LEN * sizeof(u32); +} + +#define NGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + + +static void ngbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, NGBE_REGS_LEN * sizeof(u32)); + regs_buff[NGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PWR);//0 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_CTL);//1 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PF_SM);//2 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST);//3 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_ST);//4 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_SWSM);//5 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_CTL);//7 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_EN);//8 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ST);//9 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_INT_EN);//12 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMD);//14 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_DATA);//15 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_STATUS);//16 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_USR_CMD);//17 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_ST);//23 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TCP_TIME);//25 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_LED_CTL);//26 + /* GPIO */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DR);//27 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DDR);//28 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_CTL);//29 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTEN);//30 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTMASK);//31 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTSTATUS);//32 + /* TX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_TDESC);//33 + /* RX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RDESC);//34 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RHDR);//35 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RPL);//36 + + /* TDMA */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_CTL);//37 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_POOL_TE);//38 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PB_THRE);//39 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_LLQ);//40 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_LB_L);//41 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_AS_L);//42 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_MAC_AS_L);//43 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_AS_L);//44 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_L);//45 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_H);//46 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_INS(i));//47-54 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETAG_INS(i));//55-62 + } + /* Transmit QOS */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PBWARB_CTL);//63 + + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_DRP_CNT);//64 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_SEC_DRP);//65 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PKT_CNT);//66 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_L);//67 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_H);//68 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_OS2BMC_CNT);//69 + + /* RDMA */ + /* receive control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_ARB_CTL);//70 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_POOL_RE);//71 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_QDE);//72 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_HIDE);//73 + /* static */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_DRP_PKT);//74 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PKT_CNT);//75 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_L);//76 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_H);//77 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BMC2OS_CNT);//78 + + /* RDB */ + /*flow control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCV);//79 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCL);//80 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCH);//81 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCRT);//82 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCC);//83 + /* receive packet buffer */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_CTL);//84 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_SZ);//85 + + /* lli interrupt */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LLI_THRE);//86 + /* ring assignment */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PL_CFG(i));//87-94 + } + for (i = 0; i < 32; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSTBL(i));//95-126 + } + for (i = 0; i < 10; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSRK(i));//127-136 + } + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RA_CTL);//137 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_SDP(i));//138-145 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL0(i));//146-153 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL1(i));//154-161 + } + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_SYN_CLS);//162 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_ETYPE_CLS(i));//163-170 + } + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_MPCNT);//171 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PKT_CNT);//172 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_REPLI_CNT);//173 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_DRP_CNT);//174 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXONTXC);//175 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXOFFTXC);//176 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAL);//177 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAH);//178 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_TXSWERR);//179 + + /* PSR */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_CTL);//180 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAX_SZ);//181 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_CTL);//182 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_CTL);//183 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_PKT_CNT);//184 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_PKT_CNT);//185 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_DBG_DOP_CNT);//186 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_DOP_CNT);//187 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_FLP_L);//188 + + /* vm l2 control */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_L2CTL(i));//189-196 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_ETYPE_SWC(i));//197-204 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MC_TBL(i));//205-332 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_UC_TBL(i));///333-460 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_TBL(i));//461-588 + } + /* mac switcher */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_L);//589 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_H);//590 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_VM);//591 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_IDX);//592 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC);//593 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_VM_L);//594 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_IDX);//595 + + /* mirror */ + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_CTL(i));//596-599 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VLAN_L(i));//600-603 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VM_L(i));//604-607 + } + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_CTL);//608 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPL);//609 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPH);//610 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRL);//611 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRH);//612 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_MSGTYPE);//613 + /* wake up */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_CTL);//614 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IPV);//615 + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP4TBL(i));//616-619 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP6TBL(i));//620-623 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_L(i));//624-639 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_H(i));//640-655 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_MSK(i));//656-671 + } + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_CTL);//672 + + /* TDB */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_TFCS);//673 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PB_SZ);//674 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PBRARB_CTL);//675 + /* statistic */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_OUT_PKT_CNT);//676 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_PKT_CNT);//677 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_LB_PKT_CNT);//678 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_LARGE_DOP_CNT);//679 + + /* TSEC */ + /* general tsec */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_CTL);//680 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_ST);//681 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AF);//682 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AE);//683 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_MIN_IFG);//684 + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_CTL);//685 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPL);//686 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPH);//687 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIML);//688 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIMH);//689 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INC);//690 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJL);//691 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJH);//692 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_ST);//693 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_EN);//694 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_AUX_CTL);//695 + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SDP(i));//696-699 + } + + /* RSEC */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_CTL);//700 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_ST);//701 + /* mac wrapper */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_CFG);//702 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_CFG);//703 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_PKT_FLT);//704 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_WDG_TIMEOUT);//705 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_FLOW_CTRL);//706 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_FLOW_CTRL);//707 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_ST);//708 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_EN);//709 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);//710 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);//711 + + /* BAR register */ + /* pf interrupt register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IC);//712 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_ICS);//713 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IEN);//714 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_GPIE);//715 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IC);//716 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ICS);//717 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMS);//718 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMC);//719 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_L);//720 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_H);//721 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITRSEL);//722 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITR(i));//723-730 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IVAR(i));//731-734 + } + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IVAR);//735 + /* pf receive ring register */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAL(i));//736-743 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAH(i));//744-751 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_WP(i));//752-759 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_RP(i));//760-767 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_CFG(i));//768-775 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAL(i));//776-783 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAH(i));//784-791 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_WP(i));//792-709 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_RP(i));//800-807 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_CFG(i));//808-815 + } +} + +static int ngbe_get_eeprom_len(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ngbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int ngbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->eeprom.ops.write_buffer(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + hw->eeprom.ops.update_checksum(hw); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void ngbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ngbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ngbe_driver_version, + sizeof(drvinfo->version) - 1); + strncpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + drvinfo->n_stats = NGBE_STATS_LEN - + (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + }else{ + drvinfo->n_stats = NGBE_STATS_LEN; + } + drvinfo->testinfo_len = NGBE_TEST_LEN; + drvinfo->regdump_len = ngbe_get_regs_len(netdev); +} + +static void ngbe_get_ringparam(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NGBE_MAX_RXD; + ring->tx_max_pending = NGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ngbe_set_ringparam(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + NGBE_MIN_TXD, NGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, NGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + NGBE_MIN_RXD, NGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, NGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ngbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + ngbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct ngbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ngbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ngbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct ngbe_ring)); +#ifdef HAVE_XDP_BUFF_RXQ + xdp_rxq_info_unreg(&temp_ring[i].xdp_rxq); +#endif + temp_ring[i].count = new_rx_count; + err = ngbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + + for (i = 0; i < adapter->num_rx_queues; i++) { + ngbe_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + ngbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__NGBE_RESETTING, &adapter->state); + return err; +} + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ngbe_get_stats_count(struct net_device *netdev) +{ + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + return NGBE_STATS_LEN - (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + }else{ + return NGBE_STATS_LEN; + } +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static int ngbe_get_sset_count(struct net_device *netdev, int sset) +{ +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct ngbe_adapter *adapter = netdev_priv(netdev); +#endif +#endif + struct ngbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return NGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + return NGBE_STATS_LEN - (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + }else{ + return NGBE_STATS_LEN; + } + case ETH_SS_PRIV_FLAGS: + return NGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +/** + * ngbe_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the ngbe_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 ngbe_get_priv_flags(struct net_device *dev) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u32 i , ret_flags = 0; + + for (i = 0; i < NGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct ngbe_priv_flags *priv_flags; + + priv_flags = &ngbe_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->eth_priv_flags) + ret_flags |= BIT(i); + } + return ret_flags; +} + +/** + * ngbe_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int ngbe_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u32 orig_flags, new_flags, changed_flags; + u32 i; + int status = 0; + + orig_flags = adapter->eth_priv_flags; + new_flags = orig_flags; + + if (!netif_running(dev)) + return -EINVAL; + + for (i = 0; i < NGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct ngbe_priv_flags *priv_flags; + + priv_flags = &ngbe_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; + } + + changed_flags = orig_flags ^ new_flags; + + if(!changed_flags) return 0; + + if (changed_flags & NGBE_ETH_PRIV_FLAG_LLDP) { + status = ngbe_hic_write_lldp(&adapter->hw, (u32)(new_flags & NGBE_ETH_PRIV_FLAG_LLDP)); + if(!status) + adapter->eth_priv_flags = new_flags; + } + + return status; +} + + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static void ngbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif + u64 *queue_stat; + int stat_count, k; +#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; +#endif + struct ngbe_ring *ring, *xdp_ring; + int i, j; + char *p; + + ngbe_update_stats(adapter); + + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + ngbe_gstrings_net_stats[i].stat_offset; + data[i] = (ngbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < NGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + ngbe_gstrings_stats[j].stat_offset; + data[i] = (ngbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + xdp_ring = adapter->xdp_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + if (xdp_ring) { +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&xdp_ring->syncp); +#endif + data[i] += xdp_ring->stats.packets; + data[i+1] += xdp_ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&xdp_ring->syncp, start)); +#endif + } + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + if (xdp_ring) { + data[i] += xdp_ring->stats.yields; + data[i+1] += xdp_ring->stats.misses; + data[i + 2] += xdp_ring->stats.cleaned; + } + i += 3; +#endif + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } + + stat_count = sizeof(struct vf_stats) / sizeof(u64); + for (j = 0; j < adapter->num_vfs; j++) { + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] += queue_stat[k]; + i += k; + } +} + +static void ngbe_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < NGBE_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + ngbe_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + +static void ngbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ngbe_gstrings_test, + NGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { /*temp setting2*/ + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_rx_queues; i++) { /*temp setting2*/ + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_vfs; i++) { + sprintf(p, "VF %d Rx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Rx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d MC Packets", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != NGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + ngbe_get_priv_flag_strings(netdev, data); + break; + } +} + +static int ngbe_link_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + bool link_up = 0; + u32 link_speed = 0; + + if (NGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ngbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + + +/* default sapphire register test */ +static struct ngbe_reg_test reg_test_sapphire[] = { + { NGBE_RDB_RFCL, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { NGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { NGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, NGBE_PX_RR_CFG_RR_EN }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCV, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { NGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { NGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + + +static bool reg_pattern_test(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected" + "0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + + + + +static bool ngbe_reg_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_reg_test *test; + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static bool ngbe_eeprom_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 devcap; + + if (hw->eeprom.ops.eeprom_chksum_cap_st(hw, NGBE_CALSUM_COMMAND, &devcap)) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + +static irqreturn_t ngbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ngbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = ngbe_misc_isb(adapter, NGBE_ISB_VEC1); + icr <<= 32; + icr |= ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + + + +static int ngbe_intr_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &ngbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &ngbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &ngbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ngbe_irq_disable(adapter); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, ~mask); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 0; + break; + } + } + + /* Disable all the interrupts */ + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ngbe_free_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + hw->mac.ops.disable_rx(hw); + ngbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, NGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); + + ngbe_reset(adapter); + + ngbe_free_tx_resources(&adapter->test_tx_ring); + ngbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ngbe_setup_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = NGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = ngbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + wr32m(hw, NGBE_RSEC_CTL, 0x2, 0); + ngbe_configure_tx_ring(adapter, tx_ring); + + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_MASK, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_1G); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = NGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = NGBE_RXBUFFER_2K; +#endif + + err = ngbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + hw->mac.ops.disable_rx(hw); + ngbe_configure_rx_ring(adapter, rx_ring); + hw->mac.ops.enable_rx(hw); + + return 0; + +err_nomem: + ngbe_free_desc_rings(adapter); + return ret_val; +} + +static int ngbe_setup_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup MAC loopback */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, NGBE_MAC_RX_CFG_LM); + + reg_data = rd32(hw, NGBE_PSR_CTL); + reg_data |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_UPE | + NGBE_PSR_CTL_MPE | NGBE_PSR_CTL_TPE; + wr32(hw, NGBE_PSR_CTL, reg_data); + + wr32(hw, 0x17000, + (rd32(hw, 0x17000 )| + 0x00000040U) & ~0x1U); + + wr32(hw, 0x17204, 0x4); + wr32(hw, NGBE_PSR_VLAN_CTL, + rd32(hw, NGBE_PSR_VLAN_CTL) & + ~NGBE_PSR_VLAN_CTL_VFE); + + NGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static void ngbe_loopback_cleanup(struct ngbe_adapter *adapter) +{ + wr32m(&adapter->hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, ~NGBE_MAC_RX_CFG_LM); +} + + +static void ngbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool ngbe_check_lbtest_frame(struct ngbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else + data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + +#endif + return match; +} + +static u16 ngbe_clean_test_rings(struct ngbe_ring *rx_ring, + struct ngbe_ring *tx_ring, + unsigned int size) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer; + struct ngbe_tx_buffer *tx_buffer; +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buf_len; +#else + const int bufsz = ngbe_rx_bufsz(rx_ring); +#endif + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + + while (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); +#else + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); +#endif + /* verify contents of skb */ + if (ngbe_check_lbtest_frame(rx_buffer, size)) + count++; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); +#else + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); +#endif + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + ngbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ngbe_run_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + //struct ngbe_hw *hw = &adapter->hw; + + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~NGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ngbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ngbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + msleep(10); + //e_dev_info("====hw_cnt = %d====\n", rd32(hw, 0x18308)); + + + if (good_cnt != 64) { + ret_val = 12; + // e_dev_err("====tran_cnt = %d====\n", good_cnt); + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = ngbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int ngbe_loopback_test(struct ngbe_adapter *adapter, u64 *data) +{ + *data = ngbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ngbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ngbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "mac loopback testing failed\n"); + ngbe_loopback_cleanup(adapter); + +err_loopback: + ngbe_free_desc_rings(adapter); +out: + return *data; +} + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ngbe_diag_test_count(struct net_device __always_unused *netdev) +{ + return NGBE_TEST_LEN; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static void ngbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct ngbe_hw *hw = &adapter->hw; + + e_dev_info("ngbe_diag_test: start test\n"); + + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__NGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__NGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + ngbe_close(netdev); + else{ + msleep(20); + ngbe_reset(adapter); + } + + + e_info(hw, "register testing starting\n"); + + + if (ngbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + msleep(20); + ngbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ngbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + msleep(20); + ngbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ngbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (!hw->ncsi_enabled){ + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (NGBE_FLAG_SRIOV_ENABLED | + NGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + e_info(hw, "loopback testing starting\n"); + ngbe_loopback_test(adapter, &data[3]); + } + + + data[3] = 0; + +skip_loopback: + msleep(20); + ngbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__NGBE_TESTING, &adapter->state); + if (if_running) + ngbe_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__NGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} -#include "../libwx/wx_ethtool.h" -#include "../libwx/wx_type.h" -#include "ngbe_ethtool.h" +static int ngbe_wol_exclusion(struct ngbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + int retval = 0; + + /* WOL not supported for all devices */ + if (!ngbe_wol_supported(adapter)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} static void ngbe_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { - struct wx *wx = netdev_priv(netdev); + struct ngbe_adapter *adapter = netdev_priv(netdev); + + struct ngbe_hw *hw = &adapter->hw; - if (!wx->wol_hw_supported) - return; - wol->supported = WAKE_MAGIC; + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; - if (wx->wol & WX_PSR_WKUP_CTL_MAG) + + if (ngbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if (adapter->wol & NGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MAG) wol->wolopts |= WAKE_MAGIC; + + if ( !((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP)) + wol->wolopts = 0; +} + +static int ngbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 slot = hw->bus.lan_id; + u16 value; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ngbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + if ( !((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP)) + return -EOPNOTSUPP; + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC){ + adapter->wol |= NGBE_PSR_WKUP_CTL_MAG; + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*enable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value | ( 1 << slot) ); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + return 0; + } + + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*disable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value & ~( 1 << slot )); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + return 0; +} + +static int ngbe_nway_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int ngbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA00B, 0, (u16*)&adapter->led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 16, 0, (u16*)&adapter->led_reg); + } else + adapter->led_reg = rd32(hw, NGBE_CFG_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, adapter->led_reg | 0x140); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x9); + } else + hw->mac.ops.led_on(hw, NGBE_LED_LINK_1G); + break; + + case ETHTOOL_ID_OFF: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, adapter->led_reg | 0x100); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x8); + } else + hw->mac.ops.led_off(hw, NGBE_LED_LINK_100M | NGBE_LED_LINK_1G); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, adapter->led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.write_reg_mdi(hw, 16, 0, adapter->led_reg); + } else + wr32(&adapter->hw, NGBE_CFG_LED_CTL, + adapter->led_reg); + break; + } + + return 0; +} +#else +static int ngbe_phys_id(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 led_reg; + u32 i; + + if (!data || data > 300) + data = 300; + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA00B, 0, (u16*)&led_reg); + for (i = 0; i < (data * 1000); i += 400) { + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, led_reg | 0x140); + msleep_interruptible(200); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, led_reg | 0x100); + msleep_interruptible(200); + } + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA00B, 0, led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 16, 0, (u16*)&led_reg); + for (i = 0; i < (data * 1000); i += 400) { + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x9); + msleep_interruptible(200); + hw->phy.ops.write_reg_mdi(hw, 16, 0, (adapter->led_reg & ~0xF) | 0x8); + msleep_interruptible(200); + } + hw->phy.ops.write_reg_mdi(hw, 16, 0, led_reg); + } else { + led_reg = rd32(hw, NGBE_CFG_LED_CTL); + for (i = 0; i < (data * 1000); i += 400) { + hw->mac.ops.led_on(hw, NGBE_LED_LINK_1G); + msleep_interruptible(200); + hw->mac.ops.led_off(hw, NGBE_LED_LINK_100M | NGBE_LED_LINK_1G); + msleep_interruptible(200); + } + /* Restore LED settings */ + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + } + + + return 0; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + +static int ngbe_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ngbe_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (NGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (NGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = NGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = NGBE_20K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < NGBE_70K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= NGBE_70K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < NGBE_70K_ITR)) + need_reset = true; + } + + /* hw->mac.ops.dmac_config is null*/ + if (hw->mac.ops.dmac_config && + adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling " + "is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + hw->mac.ops.dmac_config(hw); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ngbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +#ifndef HAVE_NDO_SET_FEATURES +static u32 ngbe_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int ngbe_set_rx_csum(struct net_device *netdev, u32 data) +{ +#ifdef HAVE_VXLAN_RX_OFFLOAD + struct ngbe_adapter *adapter = netdev_priv(netdev); +#endif + bool need_reset = false; + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + if (!data && (netdev->features & NETIF_F_LRO)) { + netdev->features &= ~NETIF_F_LRO; + } + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + if (!need_reset) + adapter->flags2 |= NGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + ngbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +static int ngbe_set_tx_csum(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (data) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + else + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + feature_list |= NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + feature_list |= NETIF_F_SCTP_CSUM; + + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +static int ngbe_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int ngbe_set_flags(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; +#ifndef HAVE_VLAN_RX_REGISTER + u32 changed = netdev->features ^ data; +#endif + bool need_reset = false; + int rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if ((adapter->flags & NGBE_FLAG_DCB_ENABLED) && + !(data & ETH_FLAG_RXVLAN)) + return -EINVAL; + +#endif + supported_flags |= ETH_FLAG_LRO; + +#ifdef ETHTOOL_GRXRINGS + + supported_flags |= ETH_FLAG_NTUPLE; + + +#endif +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; + +#endif + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if (changed & ETH_FLAG_RXVLAN) + ngbe_vlan_mode(netdev, netdev->features); + +#endif + +#ifdef HAVE_VXLAN_CHECKS + if (adapter->flags & NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + netdev->features & NETIF_F_RXCSUM) { + vxlan_get_rx_port(netdev); + else + ngbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef ETHTOOL_GRXRINGS + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (netdev->features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & NGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & NGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + break; + } + +#endif /* ETHTOOL_GRXRINGS */ + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS + +static int ngbe_get_rss_hash_opts(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ngbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ngbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + break; + case ETHTOOL_GRXCLSRLALL: + break; + case ETHTOOL_GRXFH: + ret = ngbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#ifdef ETHTOOL_SRXNTUPLE +/* + * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +static int ngbe_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd) +{ + return -EOPNOTSUPP; +} + +#endif +#define UDP_RSS_FLAGS (NGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + NGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int ngbe_set_rss_hash_opt(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ngbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, NGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4 + | NGBE_RDB_RA_CTL_RSS_IPV4_TCP + | NGBE_RDB_RA_CTL_RSS_IPV6 + | NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(NGBE_RDB_RA_CTL_RSS_IPV4_UDP | + NGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, NGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int ngbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + break; + case ETHTOOL_SRXCLSRLDEL: + break; + case ETHTOOL_SRXFH: + ret = ngbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +static int ngbe_rss_indir_tbl_max(struct ngbe_adapter *adapter) +{ + return 64; +} + +static u32 ngbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ngbe_rss_indir_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return ngbe_rss_indir_tbl_entries(adapter); +} + +static void ngbe_get_reta(struct ngbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ngbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#else /* HAVE_RXFH_HASHFUNC */ +//#ifdef HAVE_RXFH_HASHKEY +static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +//#else +//static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir) +//#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + if (indir) + ngbe_get_reta(adapter, indir); +//#ifdef HAVE_RXFH_HASHKEY + if (key) + memcpy(key, adapter->rss_key, ngbe_get_rxfh_key_size(netdev)); +//#endif + + return 0; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +//#ifdef HAVE_RXFH_HASHKEY +static int ngbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +//#else +//static int ngbe_set_rxfh(struct net_device *netdev, u32 *indir) +//#endif +#else /* HAVE_RXFH_NONCONST */ +//#ifdef HAVE_RXFH_HASHKEY +static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +//#else +//static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir) +//#endif +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + return -EINVAL; +#endif + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ngbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + +//#ifdef HAVE_RXFH_HASHKEY + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ngbe_get_rxfh_key_size(netdev)); +//#endif + + ngbe_store_reta(adapter); + + return 0; +} +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ + +#ifdef HAVE_ETHTOOL_GET_TS_INFO +static int ngbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + +#ifdef HAVE_PTP_1588_CLOCK + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + +#endif /* HAVE_PTP_1588_CLOCK */ + return 0; +} +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_SCHANNELS +static unsigned int ngbe_max_channels(struct ngbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = NGBE_MAX_FDIR_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = ngbe_max_rss_indices(adapter); + } + if (adapter->xdp_prog) + return max_combined = NGBE_MAX_RSS_INDICES / 2; + + return max_combined; +} + +static void ngbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ngbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + if (adapter->xdp_prog) + ch->combined_count = min(ch->combined_count, (u32)(NGBE_MAX_RSS_INDICES / 2)); + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + +} + +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = ngbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ngbe_max_channels(adapter)) + return -EINVAL; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev), 0); +} +#endif /* ETHTOOL_SCHANNELS */ + +#if 0 +#ifdef ETHTOOL_GMODULEINFO +static int ngbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) + + +static int ngbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +#endif /* ETHTOOL_GMODULEINFO */ +#endif + +#ifdef ETHTOOL_GEEE +static int ngbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; } +#endif /* ETHTOOL_GEEE */ -static int ngbe_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) +#ifdef ETHTOOL_SEEE +static int ngbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { - struct wx *wx = netdev_priv(netdev); - struct pci_dev *pdev = wx->pdev; + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + int ret_val; - if (!wx->wol_hw_supported) + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & NGBE_FLAG2_EEE_CAPABLE))) return -EOPNOTSUPP; - wx->wol = 0; - if (wol->wolopts & WAKE_MAGIC) - wx->wol = WX_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); - wr32(wx, WX_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled); + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ngbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not " + "supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not " + "supported\n"); + return -EINVAL; + } + + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + + if (edata->eee_enabled) + adapter->flags2 |= NGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~NGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } return 0; } +#endif /* ETHTOOL_SEEE */ + +static int ngbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (ef->region == 0) { + ret = ngbe_upgrade_flash(&adapter->hw, ef->region, + fw->data, fw->size); + } else { + if (ngbe_mng_present(&adapter->hw)) { + ret = ngbe_upgrade_flash_hostif(&adapter->hw, ef->region, + fw->data, fw->size); + } else + ret = -EOPNOTSUPP; + } + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reboot to make firmware work\n", ef->data); + return ret; +} -static const struct ethtool_ops ngbe_ethtool_ops = { - .get_drvinfo = wx_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, - .nway_reset = phy_ethtool_nway_reset, - .get_wol = ngbe_get_wol, - .set_wol = ngbe_set_wol, + +static struct ethtool_ops ngbe_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + .get_link_ksettings = ngbe_get_link_ksettings, + .set_link_ksettings = ngbe_set_link_ksettings, +#else + .get_settings = ngbe_get_settings, + .set_settings = ngbe_set_settings, +#endif + .get_drvinfo = ngbe_get_drvinfo, + .get_regs_len = ngbe_get_regs_len, + .get_regs = ngbe_get_regs, + .get_wol = ngbe_get_wol, + .set_wol = ngbe_set_wol, + .nway_reset = ngbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ngbe_get_eeprom_len, + .get_eeprom = ngbe_get_eeprom, + .set_eeprom = ngbe_set_eeprom, + .get_ringparam = ngbe_get_ringparam, + .set_ringparam = ngbe_set_ringparam, + .get_pauseparam = ngbe_get_pauseparam, + .set_pauseparam = ngbe_set_pauseparam, + .get_msglevel = ngbe_get_msglevel, + .set_msglevel = ngbe_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = ngbe_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = ngbe_diag_test, + .get_strings = ngbe_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = ngbe_set_phys_id, +#else + .phys_id = ngbe_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = ngbe_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = ngbe_get_sset_count, + .get_priv_flags = ngbe_get_priv_flags, + .set_priv_flags = ngbe_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = ngbe_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = ngbe_get_coalesce, + .set_coalesce = ngbe_set_coalesce, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = ngbe_get_rx_csum, + .set_rx_csum = ngbe_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ngbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ngbe_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = ngbe_set_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = ngbe_get_rxnfc, + .set_rxnfc = ngbe_set_rxnfc, +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = ngbe_set_rx_ntuple, +#endif +#endif /* ETHTOOL_GRXRINGS */ +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + + +#if 0 +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +#ifdef HAVE_RXFH_HASHKEY + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, +#else/* HAVE_RXFH_HASHKEY */ + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_indir = ngbe_get_rxfh, + .set_rxfh_indir = ngbe_set_rxfh, +#endif /* HAVE_RXFH_HASHKEY */ +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif + +#ifdef ETHTOOL_GEEE + .get_eee = ngbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ngbe_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef ETHTOOL_SCHANNELS + .get_channels = ngbe_get_channels, + .set_channels = ngbe_set_channels, +#endif +#if 0 +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ngbe_get_module_info, + .get_module_eeprom = ngbe_get_module_eeprom, +#endif +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = ngbe_get_ts_info, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + .flash_device = ngbe_set_flash, }; +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext ngbe_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = ngbe_get_ts_info, + .set_phys_id = ngbe_set_phys_id, + .get_channels = ngbe_get_channels, + .set_channels = ngbe_set_channels, +#if 0 +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ngbe_get_module_info, + .get_module_eeprom = ngbe_get_module_eeprom, +#endif +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GEEE + .get_eee = ngbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ngbe_set_eee, +#endif /* ETHTOOL_SEEE */ +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + void ngbe_set_ethtool_ops(struct net_device *netdev) { +#ifndef ETHTOOL_OPS_COMPAT netdev->ethtool_ops = &ngbe_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &ngbe_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &ngbe_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ } +#endif /* SIOCETHTOOL */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h deleted file mode 100644 index 487074e0eeecc59091b9f9eb2c3bab8ccd0d5ed8..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _NGBE_ETHTOOL_H_ -#define _NGBE_ETHTOOL_H_ - -void ngbe_set_ethtool_ops(struct net_device *netdev); - -#endif /* _NGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de95277c73758e9a8a6651ad2c0c5c9803..4ea12cc4fc1d49bd77313c364e0ecda00fb903e2 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -1,97 +1,4953 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_type.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#include "ngbe.h" + +#define NGBE_SP_MAX_TX_QUEUES 8 +#define NGBE_SP_MAX_RX_QUEUES 8 +#define NGBE_SP_RAR_ENTRIES 32 +#define NGBE_SP_MC_TBL_SIZE 128 +#define NGBE_SP_VFT_TBL_SIZE 128 +#define NGBE_SP_RX_PB_SIZE 42 + +u32 ngbe_rd32_epcs(struct ngbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_XPCS_IDA_DATA; + data = rd32(hw, portRegOffset); + + return data; +} + + +void ngbe_wr32_ephy(struct ngbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_ETHPHY_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +void ngbe_wr32_epcs(struct ngbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_XPCS_IDA_DATA; + wr32(hw, portRegOffset, data); +} + + + +/** + * ngbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + /* ??? max_msix_count for emerald */ + max_msix_count = NGBE_MAX_MSIX_VECTORS_EMERALD; + pos = pci_find_capability(((struct ngbe_adapter *)hw->back)->pdev, + PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct ngbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (NGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= NGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ngbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +int ngbe_init_hw(struct ngbe_hw *hw) +{ + int status; + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == 0) + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + + return status; +} + + +/** + * ngbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +int ngbe_clear_hw_cntrs(struct ngbe_hw *hw) +{ + u16 i = 0; + + rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RDB_LXONTXC); + rd32(hw, NGBE_RDB_LXOFFTXC); + /* ??? 1e0c not found */ + /* rd32(hw, NGBE_MAC_LXONRXC); */ + rd32(hw, NGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + /* ??? move 16? */ + wr32m(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP, i<<16); + rd32(hw, NGBE_MAC_PXOFFRXC); + } + + for (i = 0; i < 8; i++) { + wr32(hw, NGBE_PX_MPRC(i), 0); + } + /* BPRC */ + + rd32(hw, NGBE_PX_GPRC); + rd32(hw, NGBE_PX_GPTC); + rd32(hw, NGBE_PX_GORC_MSB); + rd32(hw, NGBE_PX_GOTC_MSB); + + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RDM_DRP_PKT); + return 0; +} + + + +/** + * ngbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +int ngbe_setup_fc(struct ngbe_hw *hw) +{ + int ret_val = 0; + u16 pcap_backplane = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "ngbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ngbe_fc_default) + hw->fc.requested_mode = ngbe_fc_full; + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ngbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case ngbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x100; + break; + case ngbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x80; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + } + + /* + * AUTOC restart handles negotiation of 1G on backplane + * and copper. + */ + if ((hw->phy.media_type == ngbe_media_type_copper) && + !((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + ret_val = hw->phy.ops.set_adv_pause(hw, pcap_backplane); + } + +out: + return ret_val; +} + + +/** + * ngbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +int ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/** + * ngbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the ngbe_hw structure + **/ +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status) +{ + if (hw->bus.type == ngbe_bus_type_unknown) + hw->bus.type = ngbe_bus_type_pci_express; + + switch (link_status & NGBE_PCI_LINK_WIDTH) { + case NGBE_PCI_LINK_WIDTH_1: + hw->bus.width = PCIE_LNK_X1; + break; + case NGBE_PCI_LINK_WIDTH_2: + hw->bus.width = PCIE_LNK_X2; + break; + case NGBE_PCI_LINK_WIDTH_4: + hw->bus.width = PCIE_LNK_X4; + break; + case NGBE_PCI_LINK_WIDTH_8: + hw->bus.width = PCIE_LNK_X8; + break; + default: + hw->bus.width = PCIE_LNK_WIDTH_UNKNOWN; + break; + } + + switch (link_status & NGBE_PCI_LINK_SPEED) { + case NGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = PCIE_SPEED_2_5GT; + break; + case NGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = PCIE_SPEED_5_0GT; + break; + case NGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = PCIE_SPEED_8_0GT; + break; + default: + hw->bus.speed = PCI_SPEED_UNKNOWN; + break; + } +} + +/** + * ngbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ngbe_hw structure. + **/ +int ngbe_get_bus_info(struct ngbe_hw *hw) +{ + u16 link_status; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = NGBE_READ_PCIE_WORD(hw, NGBE_PCI_LINK_STATUS); + + ngbe_set_pci_config_data(hw, link_status); + + return 0; +} + +/** + * ngbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw) +{ + struct ngbe_bus_info *bus = &hw->bus; + u32 reg = 0; + + reg = rd32(hw, NGBE_CFG_PORT_ST); + bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg); + bus->func = bus->lan_id; +} + +/** + * ngbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ngbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +int ngbe_stop_adapter(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + hw->mac.ops.disable_rx(hw); + + /* Set interrupt mask to stop interrupts from being generated */ + ngbe_intr_disable(hw, NGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, NGBE_PX_MISC_IC, 0xffffffff); + + /* ??? 0bit RW->RO */ + wr32(hw, NGBE_BME_CTL, 0x3); + + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, NGBE_PX_TR_CFG(i), + NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE, + NGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, NGBE_PX_RR_CFG(i), + NGBE_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + NGBE_WRITE_FLUSH(hw); + msec_delay(2); + + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + if (!(adapter->flags2 & NGBE_FLAG2_ECC_ERR_RESET)) + return ngbe_disable_pcie_master(hw); + else + return 0; +} + +/** + * ngbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +int ngbe_led_on(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + /* ??? */ + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +int ngbe_led_off(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * ngbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw) +{ + if (ngbe_check_mng_access(hw)) { + wr32m(hw, NGBE_MIS_SWSM, + NGBE_MIS_SWSM_SMBI, 0); + NGBE_WRITE_FLUSH(hw); + } +} + +/** + * ngbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static int ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) +{ + int status = NGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ngbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) + status = 0; + } + + return status; +} + +/** + * ngbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +int ngbe_validate_mac_addr(u8 *mac_addr) +{ + int status = 0; + + /* Make sure it is not a multicast address */ + if (NGBE_IS_MULTICAST(mac_addr)) { + DEBUGOUT("MAC address is multicast\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (NGBE_IS_BROADCAST(mac_addr)) { + DEBUGOUT("MAC address is broadcast\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + DEBUGOUT("MAC address is all zeros\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ngbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +int ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* select the MAC address */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xFFFFFFFF); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV; + + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * ngbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +int ngbe_clear_rar(struct ngbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, NGBE_PSR_MAC_SWC_VM, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * ngbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +int ngbe_init_rx_addrs(struct ngbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ngbe_validate_mac_addr(hw->mac.addr) == + NGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, + NGBE_PSR_MAC_SWC_AD_H_AV); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, NGBE_PSR_MC_TBL(i), 0); + + hw->mac.ops.init_uta_tables(hw); + + return 0; +} + +/** + * ngbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +static void ngbe_add_uc_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ngbe_add_uc_addr Complete\n"); +} + +/** + * ngbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +int ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *addr_list, + u32 addr_count, ngbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use + 1); + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 1 + i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ngbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, NGBE_PSR_CTL_UPE); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, 0); + } + } + + DEBUGOUT("ngbe_update_uc_addr_list Complete\n"); + return 0; +} + +/** + * ngbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static int ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ngbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ngbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ngbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +int ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ngbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, NGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + DEBUGOUT("ngbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * ngbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +int ngbe_enable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +int ngbe_disable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +int ngbe_fc_enable(struct ngbe_hw *hw) +{ + int ret_val = 0; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + + /* Low water mark of zero causes XOFF floods */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) { + if (!hw->fc.low_water || hw->fc.low_water >= hw->fc.high_water) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + + /* Negotiate the fc mode to use */ + ngbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, NGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~NGBE_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(hw, NGBE_RDB_RFCC); + fccfg_reg &= ~NGBE_RDB_RFCC_RFCE_802_3X; + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ngbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ngbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case ngbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, NGBE_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && + hw->fc.high_water) { + /* 32Byte granularity */ + fcrtl = (hw->fc.low_water << 10) | + NGBE_RDB_RFCL_XONE; + wr32(hw, NGBE_RDB_RFCL, fcrtl); + fcrth = (hw->fc.high_water << 10) | + NGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, NGBE_RDB_RFCL, 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, NGBE_RDB_PB_SZ) - 24576; + } + + wr32(hw, NGBE_RDB_RFCH, fcrth); + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010000; + wr32(hw, NGBE_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ngbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static int ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(NGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return NGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ngbe_fc_full) { + hw->fc.current_mode = ngbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ngbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ngbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ngbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static int ngbe_fc_autoneg_copper(struct ngbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + hw->phy.ops.get_adv_pause(hw, &technology_ability_reg); + hw->phy.ops.get_lp_adv_pause(hw, &lp_technology_ability_reg); + } + return ngbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE); +} + +/** + * ngbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ngbe_fc_autoneg(struct ngbe_hw *hw) +{ + int ret_val = NGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up = 0; + + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ngbe_media_type_fiber: + break; + + /* Autoneg flow control on copper adapters */ + case ngbe_media_type_copper: + ret_val = ngbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == NGBE_OK) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + + +/** + * ngbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. NGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +int ngbe_disable_pcie_master(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + int status = 0; + u32 i; + u16 vid = 0; + u16 cmd = 0; + u32 reg32 = 0; + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct ngbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING)) || + NGBE_REMOVED(hw->hw_addr)) + goto out; + + + /* Poll for master request bit to clear */ + for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = NGBE_ERR_MASTER_REQUESTS_PENDING; + + /* print out PCI configuration space value */ + ngbe_print_tx_hang_status(adapter); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(NGBE_ERROR_POLLING, "PCI VID is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &cmd); + ERROR_REPORT1(NGBE_ERROR_POLLING, "PCI COMMAND value is 0x%x.\n", cmd); + + reg32 = rd32(hw, 0x10000); + ERROR_REPORT1(NGBE_ERROR_POLLING, "read 0x10000 value is 0x%08x\n", reg32); + +out: + return status; +} + +/** + * ngbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +int ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ngbe_get_eeprom_semaphore(hw)) + return NGBE_ERR_SWFW_SYNC; + + if (ngbe_check_mng_access(hw)) { + gssr = rd32(hw, NGBE_MNG_SWFW_SYNC); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + wr32(hw, NGBE_MNG_SWFW_SYNC, gssr); + ngbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ngbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + } + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "ngbe_acquire_swfw_sync: i = %u, gssr = %u\n", i, gssr); + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ngbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return NGBE_ERR_SWFW_SYNC; +} + +/** + * ngbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + ngbe_get_eeprom_semaphore(hw); + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_SWFW_SYNC, mask, 0); + + ngbe_release_eeprom_semaphore(hw); +} + +/** + * ngbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +int ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +{ +#define NGBE_MAX_SECRX_POLL 40 + + int i; + int secrxreg; + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, NGBE_RSEC_CTL_RX_DIS); + for (i = 0; i < NGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, NGBE_RSEC_ST); + if (secrxreg & NGBE_RSEC_ST_RSEC_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= NGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * ngbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +int ngbe_enable_sec_rx_path(struct ngbe_hw *hw) +{ + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, 0); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +int ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + /* swap bytes for HW little endian */ + addr_low = addr[5] | (addr[4] << 8) + | (addr[3] << 16) + | (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + + if (((NGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return NGBE_ERR_INVALID_MAC_ADDR; + } + + return rar; +} + +/** + * ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +int ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo; + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return NGBE_ERR_INVALID_ARGUMENT; + } + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(hw, NGBE_PSR_MAC_SWC_VM); + + if (NGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo) + goto done; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); +done: + return 0; +} + +/** + * ngbe_set_vmdq - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +int ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 pool) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_PARAMETER(pool); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return NGBE_ERR_INVALID_ARGUMENT; + } + + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ngbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +int ngbe_set_vmdq_san_mac(struct ngbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + /* ??? */ + if (vmdq > 32) + return -1; + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + wr32(hw, NGBE_PSR_MAC_SWC_VM, 1 << vmdq); + + + return 0; +} + +/** + * ngbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +int ngbe_init_uta_tables(struct ngbe_hw *hw) +{ + int i; + + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + wr32(hw, NGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * ngbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +int ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + int regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, NGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than NGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = NGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ngbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +int ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + int regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + int ret_val = 0; + bool vfta_changed = false; + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call ngbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = ngbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, NGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * ngbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +int ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, NGBE_CFG_PORT_CTL); + if (vt & NGBE_CFG_PORT_CTL_NUM_VT_MASK) { + int vlvf_index; + u32 bits = 0; + + vlvf_index = ngbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + NGBE_PSR_VLAN_SWC_VM_L, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + NGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits |= rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, NGBE_PSR_VLAN_SWC, + (NGBE_PSR_VLAN_SWC_VIEN | vlan)); + if ((!vlan_on) && (vfta_changed != NULL)) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + *vfta_changed = false; + } + } else + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + } + + return 0; +} + +/** + * ngbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +int ngbe_clear_vfta(struct ngbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, NGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + wr32(hw, NGBE_PSR_VLAN_SWC_VM_L, 0); + } + + return 0; +} + + +/** + * ngbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + if (enable) { + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, NGBE_TDM_MAC_AS_L, + pfvfspoof & 0xff); + } else { + wr32(hw, NGBE_TDM_MAC_AS_L, 0); + } +} + +/** + * ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf > 8) + return; + + pfvfspoof = rd32(hw, NGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_VLAN_AS_L, pfvfspoof); + +} + +/** + * ngbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, + bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf <= 8) { + pfvfspoof = rd32(hw, NGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_ETYPE_AS_L, pfvfspoof); + } +} + +/** + * ngbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +int ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps) +{ + + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ngbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ngbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ngbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return NGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +int ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hicr, i, bi; + u32 hdr_size = sizeof(struct ngbe_hic_hdr); + u16 buf_len; + u32 dword_len; + int status = 0; + u32 buf[64] = {}; + + if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return NGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return NGBE_ERR_SWFW_SYNC; + } + + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + status = NGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + /*read to clean all status*/ + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "fwrdy is set before command.\n"); + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (ngbe_check_mng_access(hw)) + wr32a(hw, NGBE_MNG_MBOX, + i, NGBE_CPU_TO_LE32(buffer[i])); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_MBOX_CTL, + NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } + + buf[0] = rd32(hw, NGBE_MNG_MBOX); + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + printk("===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); + printk("===%08x\n", rd32(hw, 0x1e100)); + printk("===%08x\n", rd32(hw, 0x1e104)); + printk("===%08x\n", rd32(hw, 0x1e108)); + printk("===%08x\n", rd32(hw, 0x1e10c)); + printk("===%08x\n", rd32(hw, 0x1e044)); + printk("===%08x\n", rd32(hw, 0x10000)); + if( (buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, + bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, + bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + +rel_out: + hw->mac.ops.release_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_MB); + return status; +} + +/** + * ngbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns NGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or NGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +int ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ngbe_hic_drv_info fw_cmd; + int i; + int ret_val = 0; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + usec_delay(5000); + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ngbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ngbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + * + * Sends reset cmd to firmware through the manageability + * block. On success return 0 + * else returns NGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or NGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +int ngbe_reset_hostif(struct ngbe_hw *hw) +{ + struct ngbe_hic_reset reset_cmd; + int i; + int status = 0; + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + reset_cmd.hdr.checksum = 0; + reset_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = ngbe_host_interface_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + msleep(1); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +static u16 ngbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +int ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct ngbe_hic_upg_start start_cmd; + struct ngbe_hic_upg_write write_cmd; + struct ngbe_hic_upg_verify verify_cmd; + u32 offset; + int status = 0; + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + start_cmd.hdr.checksum = 0; + start_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + NGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = ngbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = ngbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + NGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case NGBE_MODULE_EEPROM: + verify_cmd.action_flag = NGBE_RELOAD_EEPROM; + break; + case NGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = NGBE_RESET_FIRMWARE; + break; + case NGBE_MODULE_HARDWARE: + verify_cmd.action_flag = NGBE_RESET_LAN; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "ngbe_upgrade_flash_hostif: region err %x\n", region); + return status; + } + + verify_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + + status = ngbe_host_interface_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + NGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + } + return status; +} + +/* cmd_addr is used for some special command: + * 1. to be sector address, when implemented erase sector command + * 2. to be flash address when implemented read, write flash address + */ +static int ngbe_fmgr_cmd_op(struct ngbe_hw *hw, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0, timeout = 0; + + cmd_val = NGBE_SPI_CMD_CMD(cmd) | + NGBE_SPI_CMD_CLK(SPI_CLK_DIV) | + cmd_addr; + wr32(hw, NGBE_SPI_CMD, cmd_val); + + while (1) { + if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1) + break; + + if (timeout == SPI_TIME_OUT_VALUE) + return -ETIMEDOUT; + + timeout = timeout + 1; + udelay(5); + } + + return 0; +} + +static int fmgr_usr_cmd_op(struct ngbe_hw *hw, u32 usr_cmd) +{ + int status = 0; + + wr32(hw, SPI_H_USR_CMD_REG_ADDR, usr_cmd); + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_USER_CMD, 0); + + return status; +} + +static int flash_erase_chip(struct ngbe_hw *hw) +{ + return ngbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_CHIP, 0); +} + +static int flash_erase_sector(struct ngbe_hw *hw, u32 sec_addr) +{ + return ngbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_SECTOR, sec_addr); +} + +static int ngbe_flash_write_dword(struct ngbe_hw *hw, u32 addr, u32 dword) +{ + int status = 0; + u32 data; + + wr32(hw, SPI_H_DAT_REG_ADDR, dword); + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_WRITE_DWORD, addr); + if (status) + return status; + + ngbe_flash_read_dword(hw, addr, &data); + if (dword != data) + return -EIO; + + return 0; +} + +int ngbe_flash_read_dword(struct ngbe_hw *hw, u32 addr, u32 *data) +{ + int ret = 0; + + ret = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (ret < 0) + return ret; + + *data = rd32(hw, SPI_H_DAT_REG_ADDR); + + return ret; +} + +static int ngbe_flash_write_unlock(struct ngbe_hw *hw) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x40; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000,false); + + return status; +} + +static int check_image_version(struct ngbe_hw *hw, const u8 *data) +{ + u32 image_v = 0x0; + u32 f_chip_v = 0x0; + u8 rdata_2; + u8 rdata_3, rdata_4; + u32 f_sub_id; + u8 wol = 0, ncsi = 0; + printk("===========check_image_version============\n"); + + //read image version + image_v = data[0x13a] | data[0x13b] << 8 | + data[0x13c] << 16 | data[0x13d] << 24; + printk("check_image_version=image_v: %x\n", image_v); + + //read subsytem id to check ncsi and wol + f_sub_id = data[0xfffdc] << 8 | data[0xfffdd]; + printk("The image's sub_id : %04x\n", f_sub_id); + if ((f_sub_id & 0x8000) == 0x8000) + ncsi = 1; + if ((f_sub_id & 0x4000) == 0x4000) + wol = 1; + printk("=2=ncsi : %x - wol : %x\n", ncsi, wol); + + rdata_2 = data[0xfffd8]; + printk("check_image_version=rdata_2-fffdc: %x\n", rdata_2); + rdata_3 = data[0xbc]; + printk("check_image_version=rdata_3-bc: %x\n", rdata_3); + rdata_4 = data[0x3c]; + printk("check_image_version=rdata_4-3c: %x\n", rdata_4); + + //check card's chip version + if ((image_v < 0x10015) && (image_v != 0x10012) && (image_v != 0x10013)) { + f_chip_v = 0x41;//'A' + } else if (image_v > 0x10015) { + f_chip_v = rdata_2 & 0xff; + } else if ((image_v == 0x10012) || (image_v == 0x10013) || (image_v == 0x10015)) { + if (wol == 1 || ncsi == 1) { + if (rdata_3 == 0x02) + f_chip_v = 0x41; + else + f_chip_v = 0x42; + } else { + if (rdata_4 == 0x80) + f_chip_v = 0x42; + else + f_chip_v = 0x41; + } + } + + printk("===========check_image_version============\n"); + return f_chip_v; +} + +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + u32 sector_num = 0; + u32 read_data = 0; + u8 status = 0; + u8 skip = 0; + u32 i = 0,k = 0, n = 0; + u8 flash_vendor = 0; + u32 num[128] = {0}; + u32 chip_v = 0, image_v = 0; + u32 mac_addr0_dword0_t, mac_addr0_dword1_t; + u32 mac_addr1_dword0_t, mac_addr1_dword1_t; + u32 mac_addr2_dword0_t, mac_addr2_dword1_t; + u32 mac_addr3_dword0_t, mac_addr3_dword1_t; + u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + u32 sn[24]; + u8 sn_str[40]; + u8 sn_is_str = true; + u8 vpd_tend[256]; + u32 curadr = 0; + u32 vpdadr = 0; + u8 id_str_len, pn_str_len, sn_str_len, rv_str_len; + u16 vpd_ro_len; + u32 chksum = 0; + u16 vpd_offset, vpd_end; + + read_data = rd32(hw, 0x10200); + if (read_data & 0x80000000) { + printk("The flash has been successfully upgraded once, please reboot to make it work.\n"); + return -EOPNOTSUPP; + } + + + chip_v = (rd32(hw, 0x10010) & BIT(16)) ? 0x41 : 0x42; + image_v = check_image_version(hw, data); + + printk("Checking chip/image version .......\n"); + printk("The image chip_v is %c\n", image_v); + printk("The nic chip_v is %c\n", chip_v); + if (chip_v != image_v) + { + printk("====The Gigabit image is not match the Gigabit card (chip version)====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check sub_id*/; + printk("Checking sub_id .......\n"); + printk("The card's sub_id : %04x\n", hw->subsystem_device_id); + printk("The image's sub_id : %04x\n", data[0xfffdc] << 8 | data[0xfffdd]); + if ((hw->subsystem_device_id & 0xffff) == + ((data[0xfffdc] << 8 | data[0xfffdd]) & 0xffff)){ + printk("It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff){ + printk("update anyway\n"); + } else { + printk("====The Gigabit image is not match the Gigabit card====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check dev_id*/ + printk("Checking dev_id .......\n"); + printk("The image's dev_id : %04x\n", data[0xfffde] << 8 | data[0xfffdf]); + printk("The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xffff) == ((data[0xfffde] << 8 | data[0xfffdf]) & 0xffff)) + && !(hw->device_id == 0xffff)) + { + printk("====The Gigabit image is not match the Gigabit card====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + // unlock flash write protect + ngbe_release_eeprom_semaphore(hw); + ngbe_flash_write_unlock(hw); + + wr32(hw,0x10114,0x9f050206); + wr32(hw,0x10194,0x9f050206); + + msleep(1000); + + ngbe_flash_read_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, &mac_addr0_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, &mac_addr0_dword1_t); + mac_addr0_dword1_t = mac_addr0_dword1_t & U16_MAX; + ngbe_flash_read_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, &mac_addr1_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, &mac_addr1_dword1_t); + mac_addr1_dword1_t = mac_addr1_dword1_t & U16_MAX; + ngbe_flash_read_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G, &mac_addr2_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G, &mac_addr2_dword1_t); + mac_addr2_dword1_t = mac_addr2_dword1_t & U16_MAX; + ngbe_flash_read_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G, &mac_addr3_dword0_t); + ngbe_flash_read_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G, &mac_addr3_dword1_t); + + mac_addr3_dword1_t = mac_addr3_dword1_t & U16_MAX; + for (i = 0; i < 24; i++) { + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, &sn[i]); + } + if (sn[23] == U32_MAX) + sn_is_str = false; + + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, &serial_num_dword0_t); + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+4, &serial_num_dword1_t); + ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+8, &serial_num_dword2_t); + printk("Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); + printk(" MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); + printk(" MAC Address2 is: 0x%04x%08x\n", mac_addr2_dword1_t, mac_addr2_dword0_t); + printk(" MAC Address3 is: 0x%04x%08x\n", mac_addr3_dword1_t, mac_addr3_dword0_t); + + for (k=0; k<(512/4); k++) + ngbe_flash_read_dword(hw, 0xfe000 + k*4, &num[k]); + + status = fmgr_usr_cmd_op(hw, 0x6); // write enable + status = fmgr_usr_cmd_op(hw, 0x98); // global protection un-lock + msleep(1000); // 1 s + + //rebuild vpd + vpd_offset = (data[0x35] << 8) | data[0x34]; + if (vpd_offset == 0x60) + vpd_end = 0xc0; + else if (vpd_offset == 0x170) + vpd_end = 0x200; + else + return 1; + + memset(vpd_tend, 0xff, sizeof(vpd_tend)); + curadr = vpd_offset + 1; + id_str_len = data[curadr] | data[curadr + 1] << 8; + curadr += (7 + id_str_len); + pn_str_len = data[curadr]; + curadr += 1 + pn_str_len; + + for (i = 0; i < curadr - vpd_offset; i++) { + vpd_tend[i] = data[vpd_offset + i]; + } + + memset(sn_str, 0x0, sizeof(sn_str)); + if (sn_is_str) { + for(i = 0; i < 24; i++) { + sn_str[i] = sn[23-i]; + } + sn_str_len = strlen(sn_str); + } else { + sn_str_len = 0x12; + sprintf(sn_str ,"%02x%08x%08x",(serial_num_dword2_t & 0xff), serial_num_dword1_t, serial_num_dword0_t); + } + + vpdadr = curadr - vpd_offset; + + if (data[curadr] == 'S' && data[curadr + 1] == 'N') { + if (data[curadr + 2]) { + for (i = sn_str_len; i < data[curadr + 2]; i++) + sn_str[i] = 0x20; + sn_str_len = data[curadr + 2]; + } + curadr += 3 + data[curadr + 2]; + rv_str_len = data[2 + curadr]; + } else { + rv_str_len = data[2 + curadr]; + } + + vpd_tend[vpdadr] = 'S'; + vpd_tend[vpdadr + 1] = 'N'; + vpd_tend[vpdadr + 2] = sn_str_len; + + for (i = 0; i < sn_str_len; i++) + vpd_tend[vpdadr + 3 + i] = sn_str[i]; + + vpdadr = vpdadr+ 3 + sn_str_len; + + for (i = 0; i < 3; i++) + vpd_tend[vpdadr + i] = data [curadr + i]; + + vpdadr += 3; + for (i = 0; i < rv_str_len; i++) + vpd_tend[vpdadr + i] = 0x0; + + vpdadr += rv_str_len; + vpd_ro_len = pn_str_len + sn_str_len + rv_str_len + 9; + vpd_tend[4 + id_str_len] = vpd_ro_len & 0xff; + vpd_tend[5 + id_str_len] = (vpd_ro_len >> 8) & 0xff; + + for (i = 0; i < vpdadr; i++) + chksum += vpd_tend[i]; + chksum = ~(chksum & 0xff) + 1; + vpd_tend[vpdadr - rv_str_len] = chksum; + vpd_tend[vpdadr] = 0x78; + // Note: for Spanish FLASH, first 8 sectors (4KB) in sector0 (64KB) need to use a special erase command (4K sector erase) + if (flash_vendor == 1) { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < 8; i++) { + flash_erase_sector(hw, i*128); + msleep(20); // 20 ms + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ + sector_num = size / SPI_SECTOR_SIZE; + if (flash_vendor == 2) { + status = flash_erase_chip(hw); + printk("Erase chip command, return status = %0d\n", status); + msleep(1000); // 1 s + } else { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i=0; i= PRODUCT_SERIAL_NUM_OFFSET_1G && i*4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 92) || + (i * 4 >= vpd_offset && i * 4 < vpd_end) || + (i * 4 == 0x15c)); + if (read_data != 0xffffffff && !skip) { + status = ngbe_flash_write_dword(hw, i*4, read_data); + if (status) { + printk("ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i*4); + ngbe_flash_read_dword(hw, i*4, &read_data); + printk(" Read data from Flash is: 0x%08x\n", read_data); + return 1; + } + } + if (i%1024 == 0) { + printk("\b\b\b\b%3d%%", (int)(i*4 * 100 / size)); + } + } + + for (i = 0; i < (vpd_end - vpd_offset) / 4; i++) { + read_data = vpd_tend[4 * i + 3] << 24 | vpd_tend[4 * i + 2] << 16 | vpd_tend[4 * i + 1] << 8 | vpd_tend[4 * i]; + read_data = __le32_to_cpu(read_data); + if (read_data != U32_MAX) { + status = ngbe_flash_write_dword(hw, vpd_offset + i * 4, read_data); + if (status) { + printk("ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); + ngbe_flash_read_dword(hw, i * 4, &read_data); + printk(" Read data from Flash is: 0x%08x\n", read_data); + return 1; + } + } + } + + chksum = 0; + for (i = 0; i < 0x400; i += 2) { + if (i >= vpd_offset && i < vpd_end) { + chksum += (vpd_tend[i - vpd_offset + 1] << 8 | vpd_tend[i - vpd_offset]); + } else if (i == 0x15e) { + continue; + } else { + chksum += (data[i + 1] << 8 | data[i]); + } + } + chksum = 0xbaba - chksum; + chksum &= 0xffff; + status = ngbe_flash_write_dword(hw, 0x15e, 0xffff0000 | chksum); + + ngbe_flash_write_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, mac_addr0_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, (mac_addr0_dword1_t | 0x80000000));//lan0 + ngbe_flash_write_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, mac_addr1_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, (mac_addr1_dword1_t | 0x80000000));//lan1 + ngbe_flash_write_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G, mac_addr2_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G, (mac_addr2_dword1_t | 0x80000000));//lan2 + ngbe_flash_write_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G, mac_addr3_dword0_t); + ngbe_flash_write_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G, (mac_addr3_dword1_t | 0x80000000));//lan3 + if (sn_is_str) { + for (i = 0; i < 24; i++) { + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, sn[i]); + } + } else { + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+4, serial_num_dword1_t); + ngbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+8, serial_num_dword2_t); + } + for (n=0; n < 512/4; n++) { + if(!(num[n] == 0xffffffff)) + ngbe_flash_write_dword(hw, 0xfe000 + n*4, num[n]); + } + wr32(hw, 0x10200, rd32(hw, 0x10200) | 0x80000000); + + return 0; +} + +/** + * ngbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT; + wr32(hw, NGBE_RDB_PB_SZ, rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / NGBE_KB_TO_B) - NGBE_TXPKT_SIZE_MAX; + + wr32(hw, NGBE_TDB_PB_SZ, txpktsize); + wr32(hw, NGBE_TDM_PB_THRE, txpbthresh); +} + +/** + * ngbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +int ngbe_get_thermal_sensor_data(struct ngbe_hw *hw) +{ + s64 tsv; + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + tsv = (s64)(rd32(hw, NGBE_TS_ST) & + NGBE_TS_ST_DATA_OUT_MASK); + /* 216 < tsv < 876 */ + + tsv = tsv < 876 ? tsv : 876 ; + + tsv = tsv - 216; + + tsv = tsv/4; + + tsv = tsv - 40; + + + data->sensor.temp = (s16)tsv; + + return 0; +} + +/** + * ngbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +int ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw) +{ + int status = 0; + + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct ngbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + + wr32(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN | NGBE_TS_INT_EN_ALARM_INT_EN); + + wr32(hw, NGBE_TS_EN, NGBE_TS_EN_ENA); + + + data->sensor.alarm_thresh = 115; + wr32(hw, NGBE_TS_ALARM_THRE, 0x344);/* magic num */ + data->sensor.dalarm_thresh = 110; + wr32(hw, NGBE_TS_DALARM_THRE, 0x330);/* magic num */ + + return status; +} + +void ngbe_disable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + struct ngbe_adapter *adapter = hw->back; + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + if (rxctrl & NGBE_RDB_PB_CTL_PBEN) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + if (pfdtxgswc & NGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~NGBE_RDB_PB_CTL_PBEN; + wr32(hw, NGBE_RDB_PB_CTL, rxctrl); + + /*OCP NCSI BMC need it*/ + if (!(hw->ncsi_enabled || + (hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP || + adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP)) + /* disable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE, 0); + + } + +} + + +void ngbe_enable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + + /* enable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_RE, NGBE_MAC_RX_CFG_RE); + + wr32m(hw, NGBE_RSEC_CTL, + 0x2, 0); + + wr32m(hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + pfdtxgswc |= NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +/** + * ngbe_mng_present - returns true when manangbeent capability is present + * @hw: pointer to hardware structure + */ +bool ngbe_mng_present(struct ngbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, NGBE_MIS_ST); + return fwsm & NGBE_MIS_ST_MNG_INIT_DN; +} + +bool ngbe_check_mng_access(struct ngbe_hw *hw) +{ + + if (!ngbe_mng_present(hw)) + return false; + return true; +} + +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, NGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) { + /* done */ + break; + } + msleep(200); + } + if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) { + err = NGBE_ERR_FLASH_LOADING_FAILED; + ERROR_REPORT1(NGBE_ERROR_POLLING, + "HW Loading Flash failed: %d\n", err); + } + } + return err; +} + +/* The ngbe_ptype_lookup is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT ngbe_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF ngbe_ptype_lookup[ptype].mac == NGBE_DEC_PTYPE_MAC_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum ngbe_l2_ptypes to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define NGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ NGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ NGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ NGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ NGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ NGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ NGBE_DEC_PTYPE_LAYER_##layer } + +#define NGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +ngbe_dptype ngbe_ptype_lookup[256] = { + NGBE_UKN(0x00), + NGBE_UKN(0x01), + NGBE_UKN(0x02), + NGBE_UKN(0x03), + NGBE_UKN(0x04), + NGBE_UKN(0x05), + NGBE_UKN(0x06), + NGBE_UKN(0x07), + NGBE_UKN(0x08), + NGBE_UKN(0x09), + NGBE_UKN(0x0A), + NGBE_UKN(0x0B), + NGBE_UKN(0x0C), + NGBE_UKN(0x0D), + NGBE_UKN(0x0E), + NGBE_UKN(0x0F), + + /* L2: mac */ + NGBE_UKN(0x10), + NGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + NGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + NGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + NGBE_UKN(0x20), + NGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + NGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x26), + NGBE_UKN(0x27), + NGBE_UKN(0x28), + NGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + NGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x2E), + NGBE_UKN(0x2F), + + /* L2: fcoe */ + NGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x35), + NGBE_UKN(0x36), + NGBE_UKN(0x37), + NGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x3D), + NGBE_UKN(0x3E), + NGBE_UKN(0x3F), + + NGBE_UKN(0x40), + NGBE_UKN(0x41), + NGBE_UKN(0x42), + NGBE_UKN(0x43), + NGBE_UKN(0x44), + NGBE_UKN(0x45), + NGBE_UKN(0x46), + NGBE_UKN(0x47), + NGBE_UKN(0x48), + NGBE_UKN(0x49), + NGBE_UKN(0x4A), + NGBE_UKN(0x4B), + NGBE_UKN(0x4C), + NGBE_UKN(0x4D), + NGBE_UKN(0x4E), + NGBE_UKN(0x4F), + NGBE_UKN(0x50), + NGBE_UKN(0x51), + NGBE_UKN(0x52), + NGBE_UKN(0x53), + NGBE_UKN(0x54), + NGBE_UKN(0x55), + NGBE_UKN(0x56), + NGBE_UKN(0x57), + NGBE_UKN(0x58), + NGBE_UKN(0x59), + NGBE_UKN(0x5A), + NGBE_UKN(0x5B), + NGBE_UKN(0x5C), + NGBE_UKN(0x5D), + NGBE_UKN(0x5E), + NGBE_UKN(0x5F), + NGBE_UKN(0x60), + NGBE_UKN(0x61), + NGBE_UKN(0x62), + NGBE_UKN(0x63), + NGBE_UKN(0x64), + NGBE_UKN(0x65), + NGBE_UKN(0x66), + NGBE_UKN(0x67), + NGBE_UKN(0x68), + NGBE_UKN(0x69), + NGBE_UKN(0x6A), + NGBE_UKN(0x6B), + NGBE_UKN(0x6C), + NGBE_UKN(0x6D), + NGBE_UKN(0x6E), + NGBE_UKN(0x6F), + NGBE_UKN(0x70), + NGBE_UKN(0x71), + NGBE_UKN(0x72), + NGBE_UKN(0x73), + NGBE_UKN(0x74), + NGBE_UKN(0x75), + NGBE_UKN(0x76), + NGBE_UKN(0x77), + NGBE_UKN(0x78), + NGBE_UKN(0x79), + NGBE_UKN(0x7A), + NGBE_UKN(0x7B), + NGBE_UKN(0x7C), + NGBE_UKN(0x7D), + NGBE_UKN(0x7E), + NGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + NGBE_UKN(0x80), + NGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0x86), + NGBE_UKN(0x87), + NGBE_UKN(0x88), + NGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0x8E), + NGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + NGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + NGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + NGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + NGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + NGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + NGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0x96), + NGBE_UKN(0x97), + NGBE_UKN(0x98), + NGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + NGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + NGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + NGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + NGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0x9E), + NGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xA6), + NGBE_UKN(0xA7), + NGBE_UKN(0xA8), + NGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xAE), + NGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xB6), + NGBE_UKN(0xB7), + NGBE_UKN(0xB8), + NGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xBE), + NGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + NGBE_UKN(0xC0), + NGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0xC6), + NGBE_UKN(0xC7), + NGBE_UKN(0xC8), + NGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0xCE), + NGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + NGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + NGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + NGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + NGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + NGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0xD6), + NGBE_UKN(0xD7), + NGBE_UKN(0xD8), + NGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + NGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + NGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + NGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + NGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0xDE), + NGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xE6), + NGBE_UKN(0xE7), + NGBE_UKN(0xE8), + NGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xEE), + NGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + NGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xF6), + NGBE_UKN(0xF7), + NGBE_UKN(0xF8), + NGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xFE), + NGBE_UKN(0xFF), +}; + +/** + * ngbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +int ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + int status = 0; + + if (hw->device_id == NGBE_DEV_ID_EM_TEST || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL_W || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1L || + hw->device_id == 0x10c || + hw->device_id == NGBE_DEV_ID_EM_WX1860NCSI ) { + *speed = NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T | + NGBE_PHYSICAL_LAYER_100BASE_TX; + } + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + *autoneg = false; + } + + return status; +} + +/** + * ngbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. +**/ +static int ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + int status = 0; + u16 value = 0; + unsigned long flags; + + *speed = 0; + + if (hw->mac.autoneg) + *autoneg = true; + else + *autoneg = false; + + if (status == 0) { + *speed = NGBE_LINK_SPEED_10_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_1GB_FULL; + } + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + *autoneg = false; + } + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + } + + if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if ((value & 7) == 1) { + *speed = NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + } + } + + return status; +} + +/** + * ngbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw) +{ + enum ngbe_media_type media_type; + + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "ngbe_get_media_type: hw->device_id = %u/n", hw->device_id); + + media_type = ngbe_media_type_copper; + + return media_type; +} + +/** + * ngbe_stop_mac_link_on_d3 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void ngbe_stop_mac_link_on_d3(struct ngbe_hw *hw) +{ + + UNREFERENCED_PARAMETER(hw); + return; +} + +/** + * ngbe_setup_mac_link - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +int ngbe_setup_mac_link(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + int status = 0; + u32 link_capabilities = NGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; + u32 lan_speed = 0; + bool link_up = false; + + if (!((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP || + hw->ncsi_enabled)) { + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == NGBE_LINK_SPEED_UNKNOWN) { + status = NGBE_ERR_LINK_SETUP; + goto out; + } + } + + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + goto out; + if ((link_speed == speed) && link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + } + +out: + return status; +} + + +/** + * ngbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static int ngbe_setup_copper_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + int status = 0; + struct ngbe_adapter *adapter = hw->back; + + /* Setup the PHY according to input speed */ + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + status = hw->phy.ops.setup_link(hw, speed, + need_restart_AN); + } + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return status; +} + +static int ngbe_reset_misc(struct ngbe_hw *hw) +{ + int i; + + /* receive packets that size > 2048 */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_JE, NGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, NGBE_MMC_CONTROL, + NGBE_MMC_CONTROL_RSTONRD, NGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, NGBE_MAC_RX_FLOW_CTRL, + NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, NGBE_MAC_PKT_FLT, + NGBE_MAC_PKT_FLT_PR); + + wr32m(hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_RST_INIT, 0xA00); + + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ + wr32(hw, NGBE_PSR_MNG_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0); + } + wr32(hw, NGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, NGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, NGBE_RDB_PFCMACDAH, 0x0180); + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + + if (hw->gpio_ctl == 1) { + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DDR, 0x1); + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + } + + ngbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +void ngbe_do_lanrst(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + + wr32(hw, NGBE_MIS_RST, + BIT(hw->bus.lan_id + 1) | + rd32(hw, NGBE_MIS_RST)); + msleep(20); + /*when veto set, lan reset not reset phy, so won't setup phy, + we use a flag record it*/ + if (ngbe_check_reset_blocked(hw)) + set_bit(__NGBE_NO_PHY_SET, &adapter->state); + else + clear_bit(__NGBE_NO_PHY_SET, &adapter->state); +} + +/** + * ngbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +int ngbe_reset_hw(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_adapter *adapter = hw->back; + u32 rst_delay, reset_status; + int status, i; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != 0) + return status; + + if (ngbe_get_media_type(hw) == ngbe_media_type_copper) { + mac->ops.setup_link = ngbe_setup_copper_link; + mac->ops.get_link_capabilities = + ngbe_get_copper_link_capabilities; + } + + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, NGBE_MIS_RST_ST) & + NGBE_MIS_RST_ST_RST_INIT) >> + NGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == NGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, NGBE_MIS_RST_ST); + if (!(reset_status & + NGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + msleep(100); + } + + if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = NGBE_ERR_RESET_FAILED; + DEBUGOUT("software reset polling failed to " + "complete.\n"); + return status; + } + status = ngbe_check_flash_load(hw, + NGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + return status; + } else if (hw->reset_type == NGBE_GLOBAL_RESET) { +#ifndef _WIN32 + adapter = (struct ngbe_adapter *)hw->back; + msleep(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); + +#endif /*_WIN32*/ + } + } else { + ngbe_do_lanrst(hw); + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) { + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + hw->phy.ops.phy_suspend(hw); + } + } + + status = ngbe_reset_misc(hw); + if (status != 0) + return status; + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES; + hw->mac.ops.init_rx_addrs(hw); + + pci_set_master(((struct ngbe_adapter *)hw->back)->pdev); + + return status; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out NGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define NGBE_ATR_COMMON_HASH_KEY \ + (NGBE_ATR_BUCKET_HASH_KEY & NGBE_ATR_SIGNATURE_HASH_KEY) +#define NGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (NGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (NGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (NGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (NGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + + +#define NGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define NGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define NGBE_WRITE_REG_BE32(a, reg, value) \ + wr32((a), (reg), NGBE_STORE_AS_BE32(NGBE_NTOHL(value))) + +#define NGBE_STORE_AS_BE16(_value) \ + NGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +/** + * ngbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +int ngbe_start_hw(struct ngbe_hw *hw) +{ + int ret_val = 0; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + NGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = hw->mac.ops.setup_fc(hw); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ret_val; +} + +/** + * ngbe_enable_rx_dma - Enable the Rx DMA unit on emerald + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for emerald + **/ +int ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval) +{ + + /* + * Workaround for emerald silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + hw->mac.ops.disable_sec_rx_path(hw); + + if (regval & NGBE_RDB_PB_CTL_PBEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + hw->mac.ops.enable_sec_rx_path(hw); + + return 0; +} + +/** + * ngbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +int ngbe_init_flash_params(struct ngbe_hw *hw) +{ + struct ngbe_flash_info *flash = &hw->flash; + u32 eec; + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + DEBUGOUT3("FLASH params: size = %d, address bits: %d\n", + flash->dword_size, + flash->address_bits); + + return 0; +} + +/** + * ngbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +int ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + int status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_DATA, data[i]); + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status) { + DEBUGOUT("FLASH read timed out\n"); + break; + } + } + + return status; +} + +/** + * ngbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +int ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + int status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status != 0) { + DEBUGOUT("FLASH write timed out\n"); + break; + } + data[i] = rd32(hw, NGBE_SPI_DATA); + } + + return status; +} + +/** + * ngbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +int ngbe_init_eeprom_params(struct ngbe_hw *hw) +{ + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + int status = 0; + + if (eeprom->type == ngbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ngbe_eeprom_none; + + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = ngbe_flash; + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + } + + eeprom->sw_region_offset = 0x80; + + return status; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static int ngbe_read_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 *data) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) + *data = (u16)rd32a(hw, NGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + + return 0; +} + +int ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + int tmp; + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)){ + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 1); + if(tmp == NGBE_CHECKSUM_CAP_ST_PASS ) + { + status = 0; + }else + status = NGBE_ERR_EEPROM_CHECKSUM; + }else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + + } + + return status; +} + +static int ngbe_phy_led_oem_chk(struct ngbe_hw *hw, u32 *data) +{ + int tmp; + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_PHY_LED_CONF; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); -#include -#include -#include + if (status) + return status; -#include "../libwx/wx_type.h" -#include "../libwx/wx_hw.h" -#include "ngbe_type.h" -#include "ngbe_hw.h" + if (ngbe_check_mng_access(hw)){ + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 1); + if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) + { + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 2); + *data = tmp; + status = 0; + } else if (tmp == NGBE_CHECKSUM_CAP_ST_FAIL) { + *data = tmp; + status = -1; + } else { + status = NGBE_ERR_EEPROM_CHECKSUM; + } + }else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} -int ngbe_eeprom_chksum_hostif(struct wx *wx) +int ngbe_phy_signal_set(struct ngbe_hw *hw) { - struct wx_hic_read_shadow_ram buffer; int status; - int tmp; + struct ngbe_hic_read_shadow_ram buffer; - buffer.hdr.req.cmd = NGBE_FW_EEPROM_CHECKSUM_CMD; + buffer.hdr.req.cmd = FW_PHY_SIGNAL; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = 0; - buffer.hdr.req.checksum = NGBE_FW_CMD_DEFAULT_CHECKSUM; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + /* convert offset from words to bytes */ buffer.address = 0; /* one word */ buffer.length = 0; - status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), - WX_HI_COMMAND_TIMEOUT, false); + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); - if (status < 0) + return status; +} + + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +int ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 *data) +{ + int status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +int ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ngbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + int status; + u32 i; + u32 value = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); return status; - tmp = rd32a(wx, WX_MNG_MBOX, 1); - if (tmp == NGBE_FW_CMD_ST_PASS) - return 0; - return -EIO; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = NGBE_CPU_TO_BE16(words_to_read * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = NGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + if (ngbe_check_mng_access(hw)) + value = rd32(hw, reg); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto out; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; } -static int ngbe_reset_misc(struct wx *wx) + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +static int ngbe_read_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 *data) { - wx_reset_misc(wx); - if (wx->gpio_ctrl) { - /* gpio0 is used to power on/off control*/ - wr32(wx, NGBE_GPIO_DDR, 0x1); - ngbe_sfp_modules_txrx_powerctl(wx, false); + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) + *data = (u32)rd32a(hw, NGBE_MNG_MBOX, FW_NVM_DATA_OFFSET); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; } return 0; } -void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi) + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +int ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 *data) { - /* gpio0 is used to power on control . 0 is on */ - wr32(wx, NGBE_GPIO_DR, swi ? 0 : NGBE_GPIO_DR_0); + int status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data32(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; } + /** - * ngbe_reset_hw - Perform hardware reset - * @wx: pointer to hardware structure + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. + * Write a 16 bit word to the EEPROM using the hostif. **/ -int ngbe_reset_hw(struct wx *wx) +static int ngbe_write_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 data) { - u32 val = 0; - int ret = 0; + int status; + struct ngbe_hic_write_shadow_ram buffer; - /* Call wx stop to disable tx/rx and clear interrupts */ - ret = wx_stop_adapter(wx); - if (ret != 0) - return ret; + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +int ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + int status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + /** +* ngbe_write_ee_hostif - Write EEPROM word using hostif +* @hw: pointer to hardware structure +* @offset: offset of word in the EEPROM to write +* @data: word write to the EEPROM +* +* Write a 16 bit word to the EEPROM using the hostif. +**/ +static int ngbe_write_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 data) +{ + int status; + struct ngbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/*** ngbe_write_ee_hostif - Write EEPROM word using hostif +* @hw: pointer to hardware structure +* @offset: offset of word in the EEPROM to write +* @data: word write to the EEPROM +* +* Write a 16 bit word to the EEPROM using the hostif. +**/ +int ngbe_write_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 data) +{ + int status = 0; + + + if (hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data32(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * ngbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +int ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + int status = 0; + u16 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + return status; + } - if (wx->mac_type != em_mac_type_mdi) { - val = WX_MIS_RST_LAN_RST(wx->bus.func); - wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + for (i = 0; i < words; i++) { + status = ngbe_write_ee_hostif_data(hw, offset + i, + data[i]); - ret = read_poll_timeout(rd32, val, - !(val & (BIT(9) << wx->bus.func)), 1000, - 100000, false, wx, 0x10028); - if (ret) { - wx_err(wx, "Lan reset exceed s maximum times.\n"); - return ret; + if (status != 0) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; } } - ngbe_reset_misc(wx); - /* Store the permanent mac address */ - wx_get_mac_addr(wx, wx->mac.perm_addr); + hw->mac.ops.release_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + + + +/** + * ngbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +int ngbe_calc_eeprom_checksum(struct ngbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + int status; + u16 checksum = 0; + u16 i; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + eeprom_ptrs = (u16 *)vmalloc(NGBE_EEPROM_LAST_WORD * + sizeof(u16)); + if (!eeprom_ptrs) + return NGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = ngbe_read_ee_hostif_buffer(hw, 0, + NGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < NGBE_EEPROM_LAST_WORD) + return NGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < NGBE_EEPROM_LAST_WORD; i++) + if (i != hw->eeprom.sw_region_offset + NGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + checksum = (u16)NGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (int)checksum; +} + +/** + * ngbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +int ngbe_update_eeprom_checksum(struct ngbe_hw *hw) +{ + int status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ngbe_read_ee_hostif(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = ngbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_write_ee_hostif(hw, NGBE_EEPROM_CHECKSUM, + checksum); + + return status; +} + +/** + * ngbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +int ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val) +{ + int status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + NGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = NGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(NGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + + +/** + * ngbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static int ngbe_check_mac_link(struct ngbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + struct ngbe_adapter *adapter = hw->back; + bool need_restart = false; + u16 value = 0, speed_sta; + u32 i, speed_store; + int status = 0; + + if (hw->mac.autoneg) + speed_store = hw->phy.autoneg_advertised; + else + speed_store = hw->phy.force_speed; + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) { + *link_up = true; + *speed = NGBE_LINK_SPEED_1GB_FULL; + return status; + } + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = hw->phy.ops.read_reg(hw, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = hw->phy.ops.read_reg(hw, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0x38; + if (*link_up) { + switch (speed_sta) { + case 0x28: + *speed = NGBE_LINK_SPEED_1GB_FULL; + if (NGBE_LINK_RETRY == 1) + hw->restart_an = 0; + break; + case 0x18: + *speed = NGBE_LINK_SPEED_100_FULL; + if (NGBE_LINK_RETRY == 1) + need_restart = true; + break; + case 0x8: + *speed = NGBE_LINK_SPEED_10_FULL; + if (NGBE_LINK_RETRY == 1) + need_restart = true; + break; + default: + break; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + + if (NGBE_LINK_RETRY == 1 && + hw->restart_an <= 2 && + need_restart == true && + (speed_store & NGBE_LINK_SPEED_1GB_FULL)) { + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + hw->phy.ops.write_reg(hw, 0, 0, value); + hw->restart_an++; + e_info(drv, "Restart an is %d\n", hw->restart_an); + } + + if (NGBE_POLL_LINK_STATUS != 1) + return status; + + if (*speed == NGBE_LINK_SPEED_1GB_FULL) { + status = hw->phy.ops.read_reg(hw, 0xA, 0x0, &value); + if (!status && !(value & BIT(13))) + *link_up = false; + } + + return status; +} + +static int ngbe_check_mac_link_mdi(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + int status = 0; + u16 speed_sta = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) + /* select page 0 */ + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + /* select page 1 */ + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + if (value & 0x400) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0xC000; + if (*link_up) { + if ( speed_sta == 0x8000) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + } else if ( speed_sta == 0x4000) { + *speed = NGBE_LINK_SPEED_100_FULL; + } else if ( speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + + return status; +} + +static int ngbe_check_mac_link_yt8521s(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + int status = 0; + u16 speed_sta = 0; + unsigned long flags; + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (value & 0x400) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + + if (value & 0x400) + *link_up = true; + else { + *link_up = false; + + ngbe_phy_read_reg_mdi(hw, 0x11, 0, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + + speed_sta = value & 0xC000; + if (*link_up) { + if ( speed_sta == 0x8000) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), BIT(1) | BIT(17)); + } else if ( speed_sta == 0x4000) { + *speed = NGBE_LINK_SPEED_100_FULL; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), BIT(2) | BIT(17)); + } else if ( speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), BIT(3) | BIT(17)); + } + } else { + *speed = NGBE_LINK_SPEED_UNKNOWN; + wr32m(hw, NGBE_CFG_LED_CTL, 0xE | BIT(17), 0); + } + return status; +} + +/** + * ngbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +int ngbe_setup_eee(struct ngbe_hw *hw, bool enable_eee) +{ + /* fix eee */ + + return 0; +} + +void ngbe_init_mac_link_ops(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + + mac->ops.setup_link = ngbe_setup_mac_link; +} + +/** + * ngbe_init_ops - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for emerald. + * Does not touch the hardware. + **/ + +void ngbe_init_ops(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_phy_info *phy = &hw->phy; + + ngbe_init_phy_ops_common(hw); + ngbe_init_ops_common(hw); - /* reset num_rar_entries to 128 */ - wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; - wx_init_rx_addrs(wx); - pci_set_master(wx->pdev); + if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_m88e1512_unknown) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_m88e1512; + phy->ops.reset = ngbe_phy_reset_m88e1512; + phy->ops.phy_suspend = ngbe_mv_suspend; + phy->ops.check_event = ngbe_phy_check_event_m88e1512; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause_m88e1512; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause_m88e1512; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement_m88e1512; + + mac->ops.check_link = ngbe_check_mac_link_mdi; + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_yt8521s; + phy->ops.reset = ngbe_phy_reset_yt8521s; + phy->ops.phy_suspend = ngbe_yt_suspend; + phy->ops.check_event = ngbe_phy_check_event_yt8521s; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause_yt8521s; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause_yt8521s; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement_yt8521s; + + mac->ops.check_link = ngbe_check_mac_link_yt8521s; + } +} + +void ngbe_init_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + struct ngbe_flash_info *flash = &hw->flash; + + /* MAC */ + mac->ops.init_hw = ngbe_init_hw; + mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs; + mac->ops.get_mac_addr = ngbe_get_mac_addr; + mac->ops.stop_adapter = ngbe_stop_adapter; + mac->ops.get_bus_info = ngbe_get_bus_info; + mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ngbe_release_swfw_sync; + mac->ops.reset_hw = ngbe_reset_hw; + mac->ops.get_media_type = ngbe_get_media_type; + mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path; + mac->ops.enable_rx_dma = ngbe_enable_rx_dma; + mac->ops.start_hw = ngbe_start_hw; + mac->ops.get_device_caps = ngbe_get_device_caps; + mac->ops.setup_eee = ngbe_setup_eee; + + /* LEDs */ + mac->ops.led_on = ngbe_led_on; + mac->ops.led_off = ngbe_led_off; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ngbe_set_rar; + mac->ops.clear_rar = ngbe_clear_rar; + mac->ops.init_rx_addrs = ngbe_init_rx_addrs; + mac->ops.update_uc_addr_list = ngbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list; + mac->ops.enable_mc = ngbe_enable_mc; + mac->ops.disable_mc = ngbe_disable_mc; + mac->ops.enable_rx = ngbe_enable_rx; + mac->ops.disable_rx = ngbe_disable_rx; + mac->ops.set_vmdq_san_mac = ngbe_set_vmdq_san_mac; + mac->ops.insert_mac_addr = ngbe_insert_mac_addr; + mac->rar_highwater = 1; + mac->ops.set_vfta = ngbe_set_vfta; + mac->ops.set_vlvf = ngbe_set_vlvf; + mac->ops.clear_vfta = ngbe_clear_vfta; + mac->ops.init_uta_tables = ngbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = ngbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ngbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + ngbe_set_ethertype_anti_spoofing; + + /* Flow Control */ + mac->ops.fc_enable = ngbe_fc_enable; + mac->ops.setup_fc = ngbe_setup_fc; + + /* Link */ + mac->ops.get_link_capabilities = ngbe_get_link_capabilities; + mac->ops.check_link = ngbe_check_mac_link; + mac->ops.setup_rxpba = ngbe_set_rxpba; + + mac->mcft_size = NGBE_SP_MC_TBL_SIZE; + mac->vft_size = NGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = NGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = NGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = ngbe_get_pcie_msix_count(hw); + + mac->arc_subsystem_valid = (rd32(hw, NGBE_MIS_ST) & + NGBE_MIS_ST_MNG_INIT_DN) ? true : false; + + hw->mbx.ops.init_params = ngbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.init_params = ngbe_init_eeprom_params; + eeprom->ops.calc_checksum = ngbe_calc_eeprom_checksum; + eeprom->ops.read = ngbe_read_ee_hostif; + eeprom->ops.read_buffer = ngbe_read_ee_hostif_buffer; + eeprom->ops.read32 = ngbe_read_ee_hostif32; + eeprom->ops.write = ngbe_write_ee_hostif; + eeprom->ops.write_buffer = ngbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = ngbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = ngbe_validate_eeprom_checksum; + eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap; + eeprom->ops.phy_led_oem_chk = ngbe_phy_led_oem_chk; + eeprom->ops.phy_signal_set = ngbe_phy_signal_set; + + /* FLASH */ + flash->ops.init_params = ngbe_init_flash_params; + flash->ops.read_buffer = ngbe_read_flash_buffer; + flash->ops.write_buffer = ngbe_write_flash_buffer; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver; + + mac->ops.get_thermal_sensor_data = + ngbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + ngbe_init_thermal_sensor_thresh; +} + +int ngbe_hic_write_lldp(struct ngbe_hw *hw,u32 open) +{ + u32 tmp = 0, i = 0, lldp_flash_data = 0; + int status; + struct ngbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct ngbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf3 - open; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.hdr.checksum = FW_DEFAULT_CHECKSUM; + buffer.func = PCI_FUNC(pdev->devfn); + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + + for (; i < 0x1000 / sizeof(u32); i++) { + status = ngbe_flash_read_dword(hw, NGBE_LLDP_REG + i * 4, &tmp); + if(status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + } + if (!!(lldp_flash_data & BIT(hw->bus.lan_id)) != open) + status = -EINVAL; + return status; + +} + +int ngbe_is_lldp(struct ngbe_hw *hw) +{ + u32 tmp = 0, lldp_flash_data = 0, i = 0; + struct ngbe_adapter *adapter = hw->back; + int status = 0; + + for (; i < 0x1000 / sizeof(u32); i++) { + status = ngbe_flash_read_dword(hw, NGBE_LLDP_REG + i * 4, &tmp); + if(status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + } + if (lldp_flash_data & BIT(hw->bus.lan_id)) + adapter->eth_priv_flags |= NGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~NGBE_ETH_PRIV_FLAG_LLDP; return 0; } + + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h index a4693e006816670bbfd1c2f8b27b267eb8f3ee75..dc6f0d3a384ee4648b9135c5e6f57ccf1b8e92a2 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h @@ -1,13 +1,280 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * WangXun Gigabit PCI Express Linux driver - * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". */ #ifndef _NGBE_HW_H_ #define _NGBE_HW_H_ -int ngbe_eeprom_chksum_hostif(struct wx *wx); -void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi); -int ngbe_reset_hw(struct wx *wx); +#define NGBE_EMC_INTERNAL_DATA 0x00 +#define NGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define NGBE_EMC_DIODE1_DATA 0x01 +#define NGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define NGBE_EMC_DIODE2_DATA 0x23 +#define NGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define NGBE_EMC_DIODE3_DATA 0x2A +#define NGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +#define SPI_CLK_DIV 3 + +#define SPI_CMD_ERASE_CHIP 4 // SPI erase chip command +#define SPI_CMD_ERASE_SECTOR 3 // SPI erase sector command +#define SPI_CMD_WRITE_DWORD 0 // SPI write a dword command +#define SPI_CMD_READ_DWORD 1 // SPI read a dword command +#define SPI_CMD_USER_CMD 5 // SPI user command + +#define SPI_CLK_CMD_OFFSET 28 // SPI command field offset in Command register +#define SPI_CLK_DIV_OFFSET 25 // SPI clock divide field offset in Command register + +#define SPI_TIME_OUT_VALUE 10000 +#define SPI_SECTOR_SIZE (4 * 1024) // FLASH sector size is 64KB +#define SPI_H_CMD_REG_ADDR 0x10104 // SPI Command register address +#define SPI_H_DAT_REG_ADDR 0x10108 // SPI Data register address +#define SPI_H_STA_REG_ADDR 0x1010c // SPI Status register address +#define SPI_H_USR_CMD_REG_ADDR 0x10110 // SPI User Command register address +#define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1 +#define MISC_RST_REG_ADDR 0x1000c // Misc reset register address +#define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read + +#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH +#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 +#define MAC_ADDR1_WORD0_OFFSET_1G 0x006800c // MAC Address for LAN1, stored in external FLASH +#define MAC_ADDR1_WORD1_OFFSET_1G 0x0068014 +#define MAC_ADDR2_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN2, stored in external FLASH +#define MAC_ADDR2_WORD1_OFFSET_1G 0x0070014 +#define MAC_ADDR3_WORD0_OFFSET_1G 0x007800c // MAC Address for LAN3, stored in external FLASH +#define MAC_ADDR3_WORD1_OFFSET_1G 0x0078014 +#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 // Product Serial Number, stored in external FLASH last sector + +struct ngbe_hic_read_cab { + union ngbe_hic_hdr2 hdr; + union { + u8 d8[252]; + u16 d16[126]; + u32 d32[63]; + } dbuf; +}; + + +/** + * Packet Type decoding + **/ +/* ngbe_dec_ptype.mac: outer mac */ +enum ngbe_dec_ptype_mac { + NGBE_DEC_PTYPE_MAC_IP = 0, + NGBE_DEC_PTYPE_MAC_L2 = 2, + NGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* ngbe_dec_ptype.[e]ip: outer&encaped ip */ +#define NGBE_DEC_PTYPE_IP_FRAG (0x4) +enum ngbe_dec_ptype_ip { + NGBE_DEC_PTYPE_IP_NONE = 0, + NGBE_DEC_PTYPE_IP_IPV4 = 1, + NGBE_DEC_PTYPE_IP_IPV6 = 2, + NGBE_DEC_PTYPE_IP_FGV4 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV4), + NGBE_DEC_PTYPE_IP_FGV6 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV6), +}; + +/* ngbe_dec_ptype.etype: encaped type */ +enum ngbe_dec_ptype_etype { + NGBE_DEC_PTYPE_ETYPE_NONE = 0, + NGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + NGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + NGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + NGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* ngbe_dec_ptype.proto: payload proto */ +enum ngbe_dec_ptype_prot { + NGBE_DEC_PTYPE_PROT_NONE = 0, + NGBE_DEC_PTYPE_PROT_UDP = 1, + NGBE_DEC_PTYPE_PROT_TCP = 2, + NGBE_DEC_PTYPE_PROT_SCTP = 3, + NGBE_DEC_PTYPE_PROT_ICMP = 4, + NGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* ngbe_dec_ptype.layer: payload layer */ +enum ngbe_dec_ptype_layer { + NGBE_DEC_PTYPE_LAYER_NONE = 0, + NGBE_DEC_PTYPE_LAYER_PAY2 = 1, + NGBE_DEC_PTYPE_LAYER_PAY3 = 2, + NGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct ngbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; +typedef struct ngbe_dec_ptype ngbe_dptype; + + +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw); +int ngbe_init_hw(struct ngbe_hw *hw); +int ngbe_start_hw(struct ngbe_hw *hw); +int ngbe_clear_hw_cntrs(struct ngbe_hw *hw); +int ngbe_read_pba_string(struct ngbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +int ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr); +int ngbe_get_bus_info(struct ngbe_hw *hw); +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status); +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw); +int ngbe_stop_adapter(struct ngbe_hw *hw); + +int ngbe_led_on(struct ngbe_hw *hw, u32 index); +int ngbe_led_off(struct ngbe_hw *hw, u32 index); + +int ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr); +int ngbe_clear_rar(struct ngbe_hw *hw, u32 index); +int ngbe_init_rx_addrs(struct ngbe_hw *hw); +int ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ngbe_mc_addr_itr func, bool clear); +int ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *addr_list, + u32 addr_count, ngbe_mc_addr_itr func); +int ngbe_enable_mc(struct ngbe_hw *hw); +int ngbe_disable_mc(struct ngbe_hw *hw); +int ngbe_disable_sec_rx_path(struct ngbe_hw *hw); +int ngbe_enable_sec_rx_path(struct ngbe_hw *hw); + +int ngbe_fc_enable(struct ngbe_hw *hw); +void ngbe_fc_autoneg(struct ngbe_hw *hw); +int ngbe_setup_fc(struct ngbe_hw *hw); + +int ngbe_validate_mac_addr(u8 *mac_addr); +int ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask); +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask); +int ngbe_disable_pcie_master(struct ngbe_hw *hw); + +int ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq); +int ngbe_set_vmdq_san_mac(struct ngbe_hw *hw, u32 vmdq); +int ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq); +int ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq); +int ngbe_init_uta_tables(struct ngbe_hw *hw); +int ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +int ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +int ngbe_clear_vfta(struct ngbe_hw *hw); +int ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan); + +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf); +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf); +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, + bool enable, int vf); +int ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps); +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy); +int ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +int ngbe_reset_hostif(struct ngbe_hw *hw); +u8 ngbe_calculate_checksum(u8 *buffer, u32 length); +int ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void ngbe_clear_tx_pending(struct ngbe_hw *hw); +void ngbe_stop_mac_link_on_d3(struct ngbe_hw *hw); +bool ngbe_mng_present(struct ngbe_hw *hw); +bool ngbe_check_mng_access(struct ngbe_hw *hw); + +int ngbe_get_thermal_sensor_data(struct ngbe_hw *hw); +int ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw); +void ngbe_enable_rx(struct ngbe_hw *hw); +void ngbe_disable_rx(struct ngbe_hw *hw); +int ngbe_setup_mac_link_multispeed_fiber(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit); + +/* @ngbe_api.h */ +void ngbe_atr_compute_perfect_hash(union ngbe_atr_input *input, + union ngbe_atr_input *mask); +u32 ngbe_atr_compute_sig_hash(union ngbe_atr_hash_dword input, + union ngbe_atr_hash_dword common); + +int ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, bool *autoneg); +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw); +void ngbe_disable_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_enable_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_flap_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_set_hard_rate_select_speed(struct ngbe_hw *hw, + u32 speed); +int ngbe_setup_mac_link(struct ngbe_hw *hw, u32 speed, + bool autoneg_wait_to_complete); +void ngbe_init_mac_link_ops(struct ngbe_hw *hw); +int ngbe_reset_hw(struct ngbe_hw *hw); +int ngbe_identify_phy(struct ngbe_hw *hw); +void ngbe_init_ops_common(struct ngbe_hw *hw); +int ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval); +void ngbe_init_ops(struct ngbe_hw *hw); +int ngbe_setup_eee(struct ngbe_hw *hw, bool enable_eee); + +int ngbe_init_flash_params(struct ngbe_hw *hw); +int ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data); +int ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data); + +int ngbe_read_eeprom(struct ngbe_hw *hw, + u16 offset, u16 *data); +int ngbe_read_eeprom_buffer(struct ngbe_hw *hw, u16 offset, + u16 words, u16 *data); +int ngbe_init_eeprom_params(struct ngbe_hw *hw); +int ngbe_update_eeprom_checksum(struct ngbe_hw *hw); +int ngbe_calc_eeprom_checksum(struct ngbe_hw *hw); +int ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val); +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); +int ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data); +int ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data); +int ngbe_write_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 data); + +int ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data); +int ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, u16 *data); + +int ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, u32 *data); + +u32 ngbe_rd32_epcs(struct ngbe_hw *hw, u32 addr); +void ngbe_wr32_epcs(struct ngbe_hw *hw, u32 addr, u32 data); +void ngbe_wr32_ephy(struct ngbe_hw *hw, u32 addr, u32 data); +int ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); + +int ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data); +int ngbe_phy_signal_set(struct ngbe_hw *hw); + +int ngbe_flash_read_dword(struct ngbe_hw *hw, u32 addr, u32 *data); + +int ngbe_is_lldp(struct ngbe_hw *hw); +int ngbe_hic_write_lldp(struct ngbe_hw *hw, u32 open); + #endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c new file mode 100644 index 0000000000000000000000000000000000000000..77d47c4e7af1ebe44b4530d16d182bffa064a830 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.c @@ -0,0 +1,3024 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe.h" +#include "ngbe_kcompat.h" + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i = 0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char *number(char *buf, char *end, long long num, int base, int size, int precision, int type) +{ + char c, sign, tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base == 8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base == 16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t *ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf, size, fmt, args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)) +int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) +int txgbe_dcb_netlink_register(void) +{ + return 0; +} + +int txgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int txgbe_copy_dcb_cfg(struct txgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* ngbe_kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= NGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & NGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > NGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return txgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & NGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a whitelist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current whitelist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void ngbe_flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void ngbe_flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void ngbe_flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void ngbe_flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void ngbe_flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void ngbe_flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void ngbe_flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void ngbe_flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void ngbe_flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void ngbe_flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void ngbe_flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void ngbe_flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#if 0 +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif +#endif +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#ifndef ETH_HW_ADDR_SET +void _kc_eth_hw_addr_set_ngbe(struct net_device *dev, const void *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* ETH_HW_ADDR_SET */ +#endif /* 5.17.0 */ \ No newline at end of file diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..80fbd06daf99ac73c12ed42039ea57fcdaa7d20e --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_kcompat.h @@ -0,0 +1,7738 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/*888888888888888888888888888*/ + +/*#define NGBE_SUPPORT_KYLIN*/ /*kylin to open*/ +/*#define CONFIG_EULER_KERNEL */ /*EULER to open*/ +/*#define CONFIG_UOS_KERNEL */ /*UOS to open*/ + +/**88888888888888888888888888*/ +#ifndef NGBE_STATIC_ITR +#define NGBE_STATIC_ITR 1 /* static itr configure */ +#endif + +#ifndef NGBE_LINK_RETRY +#define NGBE_LINK_RETRY 1 /* static itr configure */ +#endif + +#ifndef NGBE_POLL_LINK_STATUS +#define NGBE_POLL_LINK_STATUS 0 +#endif + +#ifndef NGBE_PCIE_RECOVER +#define NGBE_PCIE_RECOVER 1 +#endif + +#ifndef NGBE_RECOVER_CHECK +#define NGBE_RECOVER_CHECK 1 +#endif + +#ifndef NGBE_DIS_COMP_TIMEOUT +#define NGBE_DIS_COMP_TIMEOUT 1 /* static itr configure */ +#endif + +/**88888888888888888888888888*/ + + +#if defined(NGBE_SUPPORT_KYLIN) +#ifdef UTS_UBUNTU_RELEASE_ABI +#undef UTS_UBUNTU_RELEASE_ABI +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0) +#define UTS_UBUNTU_RELEASE_ABI 21 +#else +#define UTS_UBUNTU_RELEASE_ABI 21 +#endif +#endif + +/* For Kylin: + * support Kylin-4.0.2-SP2-17122218.j1-arm64 */ +#ifdef UTS_KYLINOS_RELEASE_ABI +#ifndef UTS_UBUNTU_RELEASE_ABI +#if UTS_KYLINOS_RELEASE_ABI <= 20171215 +#define UTS_UBUNTU_RELEASE_ABI 21 +#else +#define UTS_UBUNTU_RELEASE_ABI 21 +#endif +#endif /* !UTS_UBUNTU_RELEASE_ABI */ +#endif /* UTS_KYLINOS_RELEASE_ABI */ + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct ngbe_adapter +#define adapter_q_vector ngbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +#define CONFIG_NGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8)) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef ipv6_authlen +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) + +#ifdef CONFIG_SUSE_KERNEL +/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have + * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and + * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available. + * Only fall back to the manual table otherwise. We expect all future versions + * of SLE kernels to include these values, so the table will remain only for + * the older releases. + */ +#ifdef CONFIG_SUSE_VERSION +#ifndef CONFIG_SUSE_PATCHLEVEL +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing" +#endif +#ifndef CONFIG_SUSE_AUXRELEASE +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing" +#endif +#define SLE_VERSION_CODE SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL, CONFIG_SUSE_AUXRELEASE) +#else +/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back + * to the following table for older releases. + */ +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* !CONFIG_SUSE_VERSION */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x, y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x, y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type, x, y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type, x, y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 12)) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 17)) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18)) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20)) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23)) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 25) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4))) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71)) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 5))) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) - 1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3)))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev, sz, dma, gfp) \ + pci_alloc_consistent(to_pci_dev(dev), (sz), (dma)) +#define dma_free_coherent(dev, sz, addr, dma_addr) \ + pci_free_consistent(to_pci_dev(dev), (sz), (addr), (dma_addr)) + +#define dma_map_page(dev, a, b, c, d) \ + pci_map_page(to_pci_dev(dev), (a), (b), (c), (d)) +#define dma_unmap_page(dev, a, b, c) \ + pci_unmap_page(to_pci_dev(dev), (a), (b), (c)) + +#define dma_map_single(dev, a, b, c) \ + pci_map_single(to_pci_dev(dev), (a), (b), (c)) +#define dma_unmap_single(dev, a, b, c) \ + pci_unmap_single(to_pci_dev(dev), (a), (b), (c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev, a, b, c) \ + pci_dma_sync_single(to_pci_dev(dev), (a), (b), (c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev, addr, off, sz, dir) \ + pci_dma_sync_single(to_pci_dev(dev), (addr), (off) + (sz), (dir)) + +#define dma_set_mask(dev, mask) \ + pci_set_dma_mask(to_pci_dev(dev), (mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a, b) INIT_TQUEUE(a, (void (*)(void *))b, a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while (0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_T1000_DISABLE_PACKET_SPLIT +#define CONFIG_T1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_TGB_DISABLE_PACKET_SPLIT +#define CONFIG_TGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_TGC_DISABLE_PACKET_SPLIT +#define CONFIG_TGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev, mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_WANGXUN +#define PCI_VENDOR_ID_WANGXUN 0x8088 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#ifndef NGBE_PROCFS +#define NGBE_PROCFS +#endif /* NGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#ifndef NGBE_SYSFS +#define NGBE_SYSFS +#endif /* NGBE_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef NGBE_HWMON +#define NGBE_HWMON +#endif /* NGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_NGBE_DEBUG_FS +#undef HAVE_TGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_NGBE_DEBUG_FS +#define HAVE_TGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while (0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev, mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev), (mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while (0) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while (0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 3)) +#ifdef TGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* TGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while (0) +#define u64_stats_update_end(a) do { } while (0) +#define u64_stats_fetch_retry_bh(a, b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11, 2, 0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr *)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 18)) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 40)) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d, l, _g) netdev_alloc_skb_ip_align(d, l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#endif +#define skb_frag_dma_map(dev, frag, offset, size, dir) \ + _kc_skb_frag_dma_map(dev, frag, offset, size, dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 2)) +#undef ngbe_get_netdev_tc_txq +#define ngbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct *__attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a, b, c, d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1), (_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) && \ + (!(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d, p, v) __kc_pcie_capability_read_word(d, p, v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d, p, v) __kc_pcie_capability_write_word(d, p, v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d, p, c, s) \ + __kc_pcie_capability_clear_and_set_word(d, p, c, s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr, type, member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL >=7.3 && RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#if defined(__aarch64__) +#else +#ifndef CONFIG_PPC64 +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#endif/*CONFIG_PPC64LE*/ +#endif/*aarch*/ +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#if defined(__aarch64__) +#else +#ifndef CONFIG_PPC64 +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /*CONFIG_PPC64LE*/ +#endif/*aarch*/ +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#elif (defined(UTS_RELEASE) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,0)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12,1,0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +#if 0 +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +#endif +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 4)) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 13)) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi, len) __kc_napi_alloc_skb(napi, len) +#define __napi_alloc_skb(napi, len, mask) __kc_napi_alloc_skb(napi, len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 20, 0)) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11, 3, 0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0, 47, 71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11, 4, 0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65, 0, 0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 4, 0, 21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ + +#if !((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 131)) && \ + defined(NGBE_SUPPORT_KYLIN)) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,8,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#else /* 4.8.0 */ +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if !((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 131)) && \ + defined(NGBE_SUPPORT_KYLIN)) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#endif /* !NGBE_SUPPORT_KYLIN */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16))) +#if !(defined(NGBE_SUPPORT_KYLIN)) + +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define CENTOS_MTU_PORT_UPDATE +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#else /* >= 4.12 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#if !(defined(NGBE_SUPPORT_KYLIN)) + +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#endif +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_NOT_SUPPORTED_1000baseX_Full +#endif + +#endif /* ETHTOOL_GLINKSETTINGS */ + +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in ngbe_kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#define TC_SETUP_MQPRIO 0 +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +#if 0 +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if IS_ENABLED(CONFIG_NET_DEVLINK) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#include +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) && \ + (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif +#endif /* <4.18.0 && +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT + +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) +#define HAVE_DEVLINK_REGIONS +#endif /* RHEL >= 8.0 && RHEL <= 8.2 */ +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +#if 0 +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open +#endif +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +#ifndef HAVE_DEFINE_PTP_SYSTEM +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* HAVE_DEFINE_PTP_SYSTEM */ +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +#ifndef HAVE_DEFINE_FLOW_CORRELATION +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void ngbe_flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void ngbe_flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void ngbe_flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void ngbe_flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void ngbe_flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void ngbe_flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void ngbe_flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void ngbe_flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void ngbe_flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void ngbe_flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void ngbe_flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void ngbe_flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_DEFINE_FLOW_CORRELATION */ +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define devlink_params_publish(devlink) do { } while (0) +#define devlink_params_unpublish(devlink) do { } while (0) +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +#ifndef HAVE_ETH_EXTENDED_HEADLEN +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* HAVE_ETH_EXTENDED_HEADLEN */ +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +#if 0 +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* RHEL_RELEASE_VERSION(8,1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +#if 0 +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); +#endif +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* SLE < 15.3 */ +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) + +#ifndef HAVE_DEFINE_SKB_FRAG_OFF +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif + +#ifndef HAVE_DEFINE_SKB_FRAG_ADD +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif + +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#define cpu_latency_qos_update_request pm_qos_update_request +#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) +#define cpu_latency_qos_remove_request pm_qos_remove_request + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif /* (RHEL < 8.4) || (SLE < 15.3) */ +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#else /* SLE >= 15.3 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.8.0 */ +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,4,0))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +#if 0 +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#define HAVE_XDP_QUERY_PROG +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.9.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,3))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#else /* SLE >= 15.3 */ +#if 0 +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; +#endif +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !(SLE >= 15.3) */ + +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0)))) +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#else /* SLE >= 15.3 */ +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#define NEED_ETH_HW_ADDR_SET +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +#ifdef NEED_ETH_HW_ADDR_SET +#ifndef ETH_HW_ADDR_SET +void _kc_eth_hw_addr_set_ngbe(struct net_device *dev, const void *addr); +#ifndef eth_hw_addr_set +#define eth_hw_addr_set(dev, addr) \ + _kc_eth_hw_addr_set_ngbe(dev, addr) +#endif /* eth_hw_addr_set */ +#endif /* ETH_HW_ADDR_SET */ +#endif /* NEED_ETH_HW_ADDR_SET */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0)) +#else /* >=6.1.0*/ +#define HAVE_NOT_NAPI_WEIGHT +#endif /* 6.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#else /* >=6.2.0*/ +#define HAVE_NOT_PTT_ADJFREQ +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry +#endif /* 6.2.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6))) +#undef HAVE_XDP_BUFF_RXQ +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4)) +#undef HAVE_XDP_QUERY_PROG +#endif /* 8.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,6)) +#else /* >= 8.6 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,6)) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* > 8.6 */ +#endif /* < 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,8)) +#define HAVE_NOT_NAPI_WEIGHT +#endif /* == 8.8 */ + +/*****************************************************************************/ +#if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,6) +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif +#endif +#if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(9,0) +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,3) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#else /* >= 9.0 */ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,0)) +#undef HAVE_ETHTOOL_COALESCE_EXTACK +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* = 9.0*/ +#define HAVE_XDP_BUFF_RXQ +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,2)) +#else /* >= 9.2 */ +#define HAVE_NOT_NAPI_WEIGHT +#endif /* < 9.2 */ + +/*****************************************************************************/ +#if SLE_VERSION_CODE >= SLE_VERSION(15,5,0) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif + + +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ +static inline int +_kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ + +#ifndef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif + +#ifndef HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c new file mode 100644 index 0000000000000000000000000000000000000000..a164c733ab6b0c6276b20f97d7c8479fe9ce2186 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c @@ -0,0 +1,806 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" +#include "ngbe_sriov.h" + +/** + * ngbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool ngbe_cache_ring_vmdq(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + //struct ngbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + + for (i = 0; i < adapter->num_rx_queues; i++) { + + /* If we are greater than indices move to next pool */ + adapter->rx_ring[i]->reg_idx = reg_idx + i; + } + + reg_idx = vmdq->offset; + for (i = 0; i < adapter->num_tx_queues; i++) { + + /* If we are greater than indices move to next pool */ + adapter->tx_ring[i]->reg_idx = reg_idx + i; + } + + return true; +} + +/** + * ngbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool ngbe_cache_ring_rss(struct ngbe_adapter *adapter) +{ + u16 i, reg_i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0, reg_i = 0; i < adapter->num_tx_queues; i++, reg_i++) + adapter->tx_ring[i]->reg_idx = reg_i; + + for (i = 0; i < adapter->num_xdp_queues; i++, reg_i++) + adapter->xdp_ring[i]->reg_idx = reg_i; + + return true; +} + +/** + * ngbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ngbe_cache_ring_register(struct ngbe_adapter *adapter) +{ + if (ngbe_cache_ring_vmdq(adapter)) + return; + + ngbe_cache_ring_rss(adapter); +} + +#define NGBE_RSS_64Q_MASK 0x3F +#define NGBE_RSS_16Q_MASK 0xF +#define NGBE_RSS_8Q_MASK 0x7 +#define NGBE_RSS_4Q_MASK 0x3 +#define NGBE_RSS_2Q_MASK 0x1 +#define NGBE_RSS_DISABLED_MASK 0x0 + + +/** + * ngbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ngbe_set_vmdq_queues(struct ngbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = NGBE_RSS_DISABLED_MASK; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, NGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* when VMDQ on, disable RSS */ + rss_i = 1; + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i; + adapter->num_rx_queues = vmdq_i * rss_i; + +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = vmdq_i * rss_i; +#else + adapter->num_tx_queues = vmdq_i; +#endif /* HAVE_TX_MQ */ + adapter->num_xdp_queues = 0; + + return true; +} + +#ifdef HAVE_XDP_SUPPORT +static int ngbe_xdp_queues(struct ngbe_adapter *adapter) +{ + int queues = min_t(int, NGBE_MAX_XDP_QS, nr_cpu_ids); + + return adapter->xdp_prog ? queues : 0; +} +#endif +/** + * ngbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ngbe_set_rss_queues(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = NGBE_RSS_8Q_MASK; + + adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; +#endif +#ifdef HAVE_XDP_SUPPORT + if (adapter->xdp_prog) { + adapter->num_xdp_queues = min_t(int, ngbe_xdp_queues(adapter), rss_i); + } +#endif + return true; +} + +/* + * ngbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ngbe_set_num_queues(struct ngbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_xdp_queues = 0; + adapter->queues_per_pool = 1; + + if (ngbe_set_vmdq_queues(adapter)) + return; + + ngbe_set_rss_queues(adapter); + +} + +/** + * ngbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ngbe_acquire_msix_vectors(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & NGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + vectors = max(vectors, adapter->num_xdp_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + /* we need to alloc (7vfs+1pf+1misc) or (8vfs+1misc) msix entries */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + vectors += adapter->ring_feature[RING_F_VMDQ].offset; + } + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + if (vectors < 9) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + e_dev_warn("Remain available irqs < 9. Disable MISC IRQ REMAP.\n"); + } + else + vectors -= adapter->ring_feature[RING_F_VMDQ].offset; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= NGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void ngbe_add_ring(struct ngbe_ring *ring, + struct ngbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ngbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ngbe_alloc_q_vector(struct ngbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int xdp_count, unsigned int xdp_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct ngbe_q_vector *q_vector; + struct ngbe_ring *ring; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif + + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count + xdp_count; + size = sizeof(struct ngbe_q_vector) + + (sizeof(struct ngbe_ring) * ring_count); + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + +#endif + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); +#endif + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#ifndef NGBE_NO_LRO + /* initialize LRO */ + __skb_queue_head_init(&q_vector->lrolist.active); + +#endif + /* initialize NAPI */ +#ifdef HAVE_NOT_NAPI_WEIGHT + netif_napi_add(adapter->netdev, &q_vector->napi, + ngbe_poll); +#else + netif_napi_add(adapter->netdev, &q_vector->napi, + ngbe_poll, 64); +#endif +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif /*HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD*/ + +#ifdef HAVE_NDO_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, NGBE_QV_STATE_DISABLE); + +#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ngbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; + clear_ring_xdp(ring); + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ngbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + set_ring_xdp(ring); + + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ngbe_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ngbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vector(struct ngbe_adapter *adapter, int v_idx) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } +#ifdef HAVE_XDP_SUPPORT + if (static_key_enabled((struct static_key *)&ngbe_xdp_locking_key)) + static_branch_dec(&ngbe_xdp_locking_key); +#endif + + ngbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); +#ifndef NGBE_NO_LRO + __skb_queue_purge(&q_vector->lrolist.active); +#endif + kfree_rcu(q_vector, rcu); +} + +/** + * ngbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ngbe_alloc_q_vectors(struct ngbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int xdp_remaining = adapter->num_xdp_queues; + unsigned int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + xqpv, xdp_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + xdp_remaining -= xqpv; + rxr_idx++; + txr_idx++; + xdp_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ngbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vectors(struct ngbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); +} + +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ngbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!ngbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + ngbe_disable_sriov(adapter); + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; +#endif /* CONFIG_PCI_IOV */ + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + ngbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & NGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to " + "legacy. Error: %d\n", + err); + else + adapter->flags |= NGBE_FLAG_MSI_ENABLED; +} + +/** + * ngbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter) +{ + int err; + + /* if assigned vfs >= 7, the PF queue irq remain seq 0 and misc irq move from + * seq 1 to seq 8. it needs extra processions. + */ + if (adapter->num_vfs >= NGBE_MAX_VF_FUNCTIONS - 1) { + adapter->flags2 |= NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + adapter->irq_remap_offset = adapter->num_vfs; + } + + /* Number of supported queues */ + ngbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ngbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = ngbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ngbe_reset_interrupt_capability(adapter); + return err; + } + + ngbe_cache_ring_register(adapter); + + set_bit(__NGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ngbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter) +{ + ngbe_free_q_vectors(adapter); + ngbe_reset_interrupt_capability(adapter); + + /* remove this flags */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + } +} + +void ngbe_tx_ctxtdesc(struct ngbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ngbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = NGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= NGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 91f0f23c176a09406a9490637eb0ce06837f158a..f7b3f079ea88424c912c86f678d54d65fdeb70c2 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -1,496 +1,9371 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ #include #include #include #include +#include +#include #include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_hw.h" -#include "../libwx/wx_lib.h" -#include "ngbe_type.h" -#include "ngbe_mdio.h" +#include +#include +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#ifdef NETIF_F_TSO6 +#include +#endif +#endif +#include +#ifdef SIOCETHTOOL +#include +#endif + +#include +#include "ngbe.h" + +#ifdef HAVE_XDP_SUPPORT +#include +#include +#include +#endif + +#ifdef HAVE_VXLAN_CHECKS +#include +#endif /* HAVE_VXLAN_CHECKS */ + +#include "ngbe_sriov.h" #include "ngbe_hw.h" -#include "ngbe_ethtool.h" +#include "ngbe_phy.h" +#include "ngbe_pcierr.h" + +char ngbe_driver_name[32] = NGBE_NAME; +static const char ngbe_driver_string[] = + "WangXun Gigabit PCI Express Network Driver"; +#define DRV_HW_PERF + +#define FPGA + +#define DRIVERIOV -char ngbe_driver_name[] = "ngbe"; +#define BYPASS_TAG + +#define RELEASE_TAG + +#if defined(NGBE_SUPPORT_KYLIN) +#define DRV_VERSION __stringify(1.2.5.3klos) +#elif defined(CONFIG_EULER_KERNEL) +#define DRV_VERSION __stringify(1.2.5.3elos) +#elif defined(CONFIG_UOS_KERNEL) +#define DRV_VERSION __stringify(1.2.5.3uos) +#else +#define DRV_VERSION __stringify(1.2.5.3) +#endif +const char ngbe_driver_version[32] = DRV_VERSION; +static const char ngbe_copyright[] = + "Copyright (c) 2018 -2019 Beijing WangXun Technology Co., Ltd"; +static const char ngbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "If the problem persists, restart the computer, or " + "power off the system and replace the adapter"; +static const char ngbe_underheat_msg[] = + "Network adapter has been started again since the temperature " + "has been back to normal state"; /* ngbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ngbe_pci_tbl[] = { - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0}, - { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_TEST), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A2), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A2S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A4), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A4S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL2), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL2S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL4), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL4S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL_W), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860NCSI), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A1L), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A1), 0}, + { PCI_VDEVICE(TRUSTNETIC, 0x10c), 0}, /* required last entry */ { .device = 0 } }; +MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl); -/** - * ngbe_init_type_code - Initialize the shared code - * @wx: pointer to hardware structure - **/ -static void ngbe_init_type_code(struct wx *wx) + +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#ifdef HAVE_XDP_SUPPORT +DEFINE_STATIC_KEY_FALSE(ngbe_xdp_locking_key); +EXPORT_SYMBOL(ngbe_xdp_locking_key); +#endif +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *ngbe_wq; + +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev); +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring); +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring); +static u32 ngbe_tx_cmd_type(u32 tx_flags); + +extern ngbe_dptype ngbe_ptype_lookup[256]; + +static inline ngbe_dptype ngbe_decode_ptype(const u8 ptype) { - int wol_mask = 0, ncsi_mask = 0; - u16 type_mask = 0, val; + return ngbe_ptype_lookup[ptype]; +} - wx->mac.type = wx_mac_em; - type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK); - ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK; - wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK; +static inline ngbe_dptype +decode_rx_desc_ptype(const union ngbe_rx_desc *rx_desc) +{ + return ngbe_decode_ptype(NGBE_RXD_PKTTYPE(rx_desc)); +} - val = rd32(wx, WX_CFG_PORT_ST); - wx->mac_type = (val & BIT(7)) >> 7 ? - em_mac_type_rgmii : - em_mac_type_mdi; +void ngbe_print_tx_hang_status(struct ngbe_adapter *adapter) +{ + int pos; + u32 value; + struct pci_dev *pdev = adapter->pdev; + u16 devctl2; - wx->wol_hw_supported = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; - wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || - type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &value); + e_info(probe, "AER Uncorrectable Error Status: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &value); + e_info(probe, "AER Uncorrectable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &value); + e_info(probe, "AER Uncorrectable Error Severity: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_STATUS, &value); + e_info(probe, "AER Correctable Error Status: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_MASK, &value); + e_info(probe, "AER Correctable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_CAP, &value); + e_info(probe, "AER Capabilities and Control Register: 0x%08x\n", value); + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &devctl2); + e_info(probe, "Device Control2 Register: 0x%04x\n", devctl2); + + e_info(probe, "Tx flow control Status[TDB_TFCS 0xCE00]: 0x%x\n", + rd32(&adapter->hw, NGBE_TDB_TFCS)); + + e_info(probe, "Tx Desc Fatal Error[TDM_DESC_FATAL 0x80D0]: 0x%x\n", + rd32(&adapter->hw, NGBE_TDM_DESC_FATAL)); + return; +} + +static void ngbe_check_minimum_link(struct ngbe_adapter *adapter, + int expected_gts) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == ngbe_bus_type_internal) + return; + + pdev = adapter->pdev; + + pcie_print_link_status(pdev); - switch (type_mask) { - case NGBE_SUBID_LY_YT8521S_SFP: - case NGBE_SUBID_LY_M88E1512_SFP: - case NGBE_SUBID_YT8521S_SFP_GPIO: - case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO: - wx->gpio_ctrl = 1; - break; - default: - wx->gpio_ctrl = 0; - break; - } } /** - * ngbe_init_rss_key - Initialize wx RSS key - * @wx: device handle + * ngbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure * - * Allocates and initializes the RSS key if it is not allocated. + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. **/ -static inline int ngbe_init_rss_key(struct wx *wx) +static inline int ngbe_enumerate_functions(struct ngbe_adapter *adapter) { - u32 *rss_key; + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; - if (!wx->rss_key) { - rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); - if (unlikely(!rss_key)) - return -ENOMEM; + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif - netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); - wx->rss_key = rss_key; + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; } - return 0; + return physfns; } -/** - * ngbe_sw_init - Initialize general software structures - * @wx: board private structure to initialize - **/ -static int ngbe_sw_init(struct wx *wx) +static void ngbe_service_event_schedule(struct ngbe_adapter *adapter) { - struct pci_dev *pdev = wx->pdev; - u16 msix_count = 0; - int err = 0; - - wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; - wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; - wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; - wx->mac.mcft_size = NGBE_MC_TBL_SIZE; - wx->mac.vft_size = NGBE_SP_VFT_TBL_SIZE; - wx->mac.rx_pb_size = NGBE_RX_PB_SIZE; - wx->mac.tx_pb_size = NGBE_TDB_PB_SZ; + if (!test_bit(__NGBE_DOWN, &adapter->state) && + !test_bit(__NGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__NGBE_SERVICE_SCHED, &adapter->state)) + queue_work(ngbe_wq, &adapter->service_task); +} - /* PCI config space info */ - err = wx_sw_init(wx); - if (err < 0) - return err; +static void ngbe_service_event_complete(struct ngbe_adapter *adapter) +{ + BUG_ON(!test_bit(__NGBE_SERVICE_SCHED, &adapter->state)); - /* mac type, phy type , oem type */ - ngbe_init_type_code(wx); + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); +} - /* Set common capability flags and settings */ - wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS; - err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS); - if (err) - dev_err(&pdev->dev, "Do not support MSI-X\n"); - wx->mac.max_msix_vectors = msix_count; +static void ngbe_remove_adapter(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; - if (ngbe_init_rss_key(wx)) - return -ENOMEM; + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + ngbe_service_event_schedule(adapter); +} - /* enable itr by default in dynamic mode */ - wx->rx_itr_setting = 1; - wx->tx_itr_setting = 1; +static void ngbe_check_remove(struct ngbe_hw *hw, u32 reg) +{ + u32 value; - /* set default ring sizes */ - wx->tx_ring_count = NGBE_DEFAULT_TXD; - wx->rx_ring_count = NGBE_DEFAULT_RXD; + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned NGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == NGBE_CFG_PORT_ST) { + ngbe_remove_adapter(hw); + return; + } + value = rd32(hw, NGBE_CFG_PORT_ST); + if (value == NGBE_FAILED_READ_REG) + ngbe_remove_adapter(hw); +} - /* set default work limits */ - wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; - wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; +static u32 ngbe_validate_register_read(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct ngbe_adapter *adapter = hw->back; - return 0; + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + for (i = 0; i < NGBE_DEAD_READ_RETRIES; ++i) { + value = ngbe_rd32(reg_addr + reg); + if (value != NGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == NGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; } /** - * ngbe_irq_enable - Enable default interrupt generation settings - * @wx: board private structure - * @queues: enable all queues interrupts - **/ -static void ngbe_irq_enable(struct wx *wx, bool queues) + * ngbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or NGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns NGBE_FAILED_READ_REG (all ones). + */ +u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet) { - u32 mask; + u32 value; + u8 __iomem *reg_addr; - /* enable misc interrupt */ - mask = NGBE_PX_MISC_IEN_MASK; - - wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0); - wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1); - wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0); - wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3); + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + value = ngbe_rd32(reg_addr + reg); + if (unlikely(value == NGBE_FAILED_READ_REG)) + ngbe_check_remove(hw, reg); + if (unlikely(value == NGBE_DEAD_READ_REG)) + value = ngbe_validate_register_read(hw, reg, quiet); + return value; +} - wr32(wx, WX_PX_MISC_IEN, mask); +static void ngbe_release_hw_control(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, 0); + if (hw->phy.type == ngbe_phy_yt8521s_sfi) + wr32(&adapter->hw, NGBE_CFG_LED_CTL, 0x0); +} - /* mask interrupt */ - if (queues) - wx_intr_enable(wx, NGBE_INTR_ALL); - else - wx_intr_enable(wx, NGBE_INTR_MISC(wx)); +static void ngbe_get_hw_control(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD); + if (hw->phy.type == ngbe_phy_yt8521s_sfi) + wr32(&adapter->hw, NGBE_CFG_LED_CTL, BIT(18)); } /** - * ngbe_intr - msi/legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure + * ngbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * **/ -static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +static void ngbe_set_ivar(struct ngbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) { - struct wx_q_vector *q_vector; - struct wx *wx = data; - struct pci_dev *pdev; - u32 eicr; + u32 ivar, index; + struct ngbe_hw *hw = &adapter->hw; - q_vector = wx->q_vector[0]; - pdev = wx->pdev; + if (direction == -1) { + /* other causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, NGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + /* if assigned VFs >= 7, the pf misc irq shall be remapped to 0x88. */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + ivar = msix_vector; + wr32(&adapter->hw, NGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, NGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, NGBE_PX_IVAR(queue >> 1), ivar); + } +} - eicr = wx_misc_isb(wx, WX_ISB_VEC0); - if (!eicr) { - /* shared interrupt alert! - * the interrupt that we masked before the EICR read. - */ - if (netif_running(wx->netdev)) - ngbe_irq_enable(wx, true); - return IRQ_NONE; /* Not our interrupt */ +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *ring, + struct ngbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); } - wx->isb_mem[WX_ISB_VEC0] = 0; - if (!(pdev->msi_enabled)) - wr32(wx, WX_PX_INTA, 1); + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} - wx->isb_mem[WX_ISB_MISC] = 0; - /* would disable interrupts here but it is auto disabled */ - napi_schedule_irqoff(&q_vector->napi); +static void ngbe_update_xoff_rx_lfc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; - if (netif_running(wx->netdev)) - ngbe_irq_enable(wx, false); + if ((hw->fc.current_mode != ngbe_fc_full) && + (hw->fc.current_mode != ngbe_fc_rx_pause)) + return; - return IRQ_HANDLED; + data = rd32(hw, NGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__NGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + for (i = 0; i < adapter->num_xdp_queues; i++) + clear_bit(__NGBE_HANG_CHECK_ARMED, + &adapter->xdp_ring[i]->state); } -static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) + +static u64 ngbe_get_tx_completed(struct ngbe_ring *ring) { - struct wx *wx = data; + return ring->stats.packets; +} - /* re-enable the original interrupt state, no lsc, no queues */ - if (netif_running(wx->netdev)) - ngbe_irq_enable(wx, false); +static u64 ngbe_get_tx_pending(struct ngbe_ring *ring) +{ + struct ngbe_adapter *adapter; + struct ngbe_hw *hw; + u32 head, tail; - return IRQ_HANDLED; + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, NGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, NGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool ngbe_check_tx_hang(struct ngbe_ring *tx_ring) +{ + u64 tx_done = ngbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = ngbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) { + + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__NGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__NGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +static void ngbe_tx_timeout_dorecovery(struct ngbe_adapter *adapter) +{ + /* schedule immediate reset if we believe we hung */ + if (adapter->hw.bus.lan_id == 0) + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + else + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); + ngbe_service_event_schedule(adapter); } /** - * ngbe_request_msix_irqs - Initialize MSI-X interrupts - * @wx: board private structure - * - * ngbe_request_msix_irqs allocates MSI-X vectors and requests - * interrupts from the kernel. + * ngbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct **/ -static int ngbe_request_msix_irqs(struct wx *wx) +static void ngbe_tx_timeout_reset(struct ngbe_adapter *adapter) { - struct net_device *netdev = wx->netdev; - int vector, err; +#if 0 + if (time_after(jiffies, (adapter->tx_timeout_last_recovery + HZ*20))) + adapter->tx_timeout_recovery_level = 0; + else if (time_before(jiffies, + (adapter->tx_timeout_last_recovery + + adapter->netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ - for (vector = 0; vector < wx->num_q_vectors; vector++) { - struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; - - if (q_vector->tx.ring && q_vector->rx.ring) - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-TxRx-%d", netdev->name, entry->entry); - else - /* skip this unused q_vector */ - continue; + adapter->tx_timeout_last_recovery = jiffies; + netdev_info(adapter->netdev, "tx_timeout recovery level %d\n", + adapter->tx_timeout_recovery_level); - err = request_irq(entry->vector, wx_msix_clean_rings, 0, - q_vector->name, q_vector); - if (err) { - wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", - q_vector->name, err); - goto free_queue_irqs; + /* Do the reset outside of interrupt context */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + switch (adapter->tx_timeout_recovery_level) { + case 0: + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + break; + case 1: + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED; + break; + case 2: + adapter->flags2 |= NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + break; + default: + netdev_err(adapter->netdev, + "tx_timeout recovery unsuccessful\n"); + break; } } +#endif + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + ngbe_service_event_schedule(adapter); + } +// adapter->tx_timeout_recovery_level++; +} - err = request_irq(wx->msix_entries[vector].vector, - ngbe_msix_other, 0, netdev->name, wx); +/** + * ngbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +#ifdef HAVE_TX_TIMEOUT_TXQUEUE + static void ngbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else + static void ngbe_tx_timeout(struct net_device *netdev) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int i; + u16 vid = 0; + u16 cmd = 0; + u32 reg32 = 0; + u32 head, tail; - if (err) { - wx_err(wx, "request_irq for msix_other failed: %d\n", err); - goto free_queue_irqs; + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &cmd); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", cmd); + + reg32 = rd32(&adapter->hw, 0x10000); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", reg32); + + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, NGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, NGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(NGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); } - return 0; + reg32 = rd32(&adapter->hw, NGBE_PX_IMS); + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PX_IMS value is 0x%08x\n", reg32); + if (reg32) { + ERROR_REPORT1(NGBE_ERROR_POLLING, "clear interrupt mask.\n"); + wr32(&adapter->hw, NGBE_PX_ICS, reg32); + wr32(&adapter->hw, NGBE_PX_IMC, reg32); + } -free_queue_irqs: - while (vector) { - vector--; - free_irq(wx->msix_entries[vector].vector, - wx->q_vector[vector]); + if (NGBE_RECOVER_CHECK == 1) { + if (vid == NGBE_FAILED_READ_CFG_WORD) { + ngbe_tx_timeout_dorecovery(adapter); + } else { + ngbe_print_tx_hang_status(adapter); + ngbe_tx_timeout_reset(adapter); + } + } else { + ngbe_tx_timeout_dorecovery(adapter); } - wx_reset_interrupt_capability(wx); - return err; } /** - * ngbe_request_irq - initialize interrupts - * @wx: board private structure - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. + * ngbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean **/ -static int ngbe_request_irq(struct wx *wx) +static bool ngbe_clean_tx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *tx_ring) { - struct net_device *netdev = wx->netdev; - struct pci_dev *pdev = wx->pdev; - int err; + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + struct ngbe_hw *hw = &adapter->hw; + u16 vid = 0; - if (pdev->msix_enabled) - err = ngbe_request_msix_irqs(wx); - else if (pdev->msi_enabled) - err = request_irq(pdev->irq, ngbe_intr, 0, - netdev->name, wx); - else - err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED, - netdev->name, wx); + if (test_bit(__NGBE_DOWN, &adapter->state)) + return true; - if (err) - wx_err(wx, "request_irq failed, Error %d\n", err); + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = NGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + do { - return err; -} + union ngbe_tx_desc *eop_desc = tx_buffer->next_to_watch; -static void ngbe_disable_device(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - u32 i; + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; - /* disable all enabled rx queues */ - for (i = 0; i < wx->num_rx_queues; i++) - /* this call also flushes the previous write */ - wx_disable_rx_queue(wx, wx->rx_ring[i]); - /* disable receives */ - wx_disable_rx(wx); - wx_napi_disable_all(wx); - netif_tx_stop_all_queues(netdev); - netif_tx_disable(netdev); - if (wx->gpio_ctrl) - ngbe_sfp_modules_txrx_powerctl(wx, false); - wx_irq_disable(wx); - /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < wx->num_tx_queues; i++) { - u8 reg_idx = wx->tx_ring[i]->reg_idx; + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(NGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT + xdp_return_frame(tx_buffer->xdpf); +#else + page_frag_free(tx_buffer->data); +#endif + else +#endif + dev_consume_skb_any(tx_buffer->skb); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); - wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + /* clear tx_buffer data */ + if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT + tx_buffer->xdpf = NULL; +#else + tx_buffer->data = NULL; +#endif + else + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring)) { + if (!ngbe_check_tx_hang(tx_ring)) { + adapter->hang_cnt = 0; + } else + adapter->hang_cnt++; + + if ( adapter->hang_cnt >= 5 ) { + /* schedule immediate reset if we believe we hung */ + + e_err(drv, "Detected Tx Unit Hang%s\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? " (XDP)" : "", + tx_ring->queue_index, + rd32(hw, NGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, NGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == NGBE_FAILED_READ_CFG_WORD) { + e_info(hw, "pcie link has been lost.\n"); + } + + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + if (NGBE_RECOVER_CHECK == 1) { + if (vid == NGBE_FAILED_READ_CFG_WORD) { + ngbe_tx_timeout_dorecovery(adapter); + } else { + ngbe_print_tx_hang_status(adapter); + ngbe_tx_timeout_reset(adapter); + } + } else { + ngbe_tx_timeout_dorecovery(adapter); + } + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } } -} + if(ring_is_xdp(tx_ring)) + return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); -static void ngbe_down(struct wx *wx) -{ - phy_stop(wx->phydev); - ngbe_disable_device(wx); - wx_clean_all_tx_rings(wx); - wx_clean_all_rx_rings(wx); +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ngbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__NGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (netif_queue_stopped(tx_ring->netdev) && + !test_bit(__NGBE_DOWN, &adapter->state)) { + netif_wake_queue(tx_ring->netdev); + ++tx_ring->tx_stats.restart_queue; + } +#endif + } + + return !!budget; } -static void ngbe_up(struct wx *wx) + +#ifdef NETIF_F_RXHASH +#define NGBE_RSS_L4_TYPES_MASK \ + ((1ul << NGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_SCTP)) + +static inline void ngbe_rx_hash(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) { - wx_configure_vectors(wx); + u16 rss_type; - /* make sure to complete pre-operations */ - smp_mb__before_atomic(); - wx_napi_enable_all(wx); - /* enable transmits */ - netif_tx_start_all_queues(wx->netdev); + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; - /* clear any pending interrupts, may auto mask */ - rd32(wx, WX_PX_IC(0)); - rd32(wx, WX_PX_MISC_IC); - ngbe_irq_enable(wx, true); - if (wx->gpio_ctrl) - ngbe_sfp_modules_txrx_powerctl(wx, true); + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; - phy_start(wx->phydev); + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (NGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } +#endif /* NETIF_F_RXHASH */ + /** - * ngbe_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). + * ngbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified **/ -static int ngbe_open(struct net_device *netdev) +static inline void ngbe_rx_checksum(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) { - struct wx *wx = netdev_priv(netdev); - int err; + ngbe_dptype dptype = decode_rx_desc_ptype(rx_desc); - wx_control_hw(wx, true); + skb->ip_summed = CHECKSUM_NONE; - err = wx_setup_resources(wx); - if (err) - return err; + skb_checksum_none_assert(skb); - wx_configure(wx); - - err = ngbe_request_irq(wx); - if (err) - goto err_free_resources; + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; - err = ngbe_phy_connect(wx); - if (err) - goto err_free_irq; + /* if IPv4 header checksum error */ + if ((ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_IPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_IPE)) || + (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_OUTERIPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } - err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); - if (err) - goto err_dis_phy; + /* L4 checksum offload flag must set for the below code to work */ + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_L4CS)) + return; - err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); - if (err) - goto err_dis_phy; + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != NGBE_DEC_PTYPE_PROT_SCTP && NGBE_RXD_IPV6EX(rx_desc)) + return; - ngbe_up(wx); + /* if L4 checksum error */ + if (ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= NGBE_DEC_PTYPE_ETYPE_IG) { + #ifdef HAVE_SKBUFF_CSUM_LEVEL + skb->csum_level = 1; + #endif + } - return 0; -err_dis_phy: - phy_disconnect(wx->phydev); -err_free_irq: - wx_free_irq(wx); -err_free_resources: - wx_free_resources(wx); - return err; + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; } -/** - * ngbe_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int ngbe_close(struct net_device *netdev) +static bool ngbe_alloc_mapped_skb(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) { - struct wx *wx = netdev_priv(netdev); + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; - ngbe_down(wx); - wx_free_irq(wx); - wx_free_resources(wx); - phy_disconnect(wx->phydev); - wx_control_hw(wx, false); + if (unlikely(dma)) + return true; - return 0; -} + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } -static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) -{ - struct wx *wx = pci_get_drvdata(pdev); - struct net_device *netdev; - u32 wufc = wx->wol; + bi->skb = skb; + + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +static bool ngbe_alloc_mapped_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(ngbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ngbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = rx_ring->xdp_prog ? XDP_PACKET_HEADROOM : 0; + + return true; +} +#endif +/** + * ngbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ngbe_alloc_rx_buffers(struct ngbe_ring *rx_ring, u16 cleaned_count) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = NGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + if (!ngbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + +#else + if (ring_is_hs_enabled(rx_ring)) { + if (!ngbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } + + if (!ngbe_alloc_mapped_page(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); +#endif + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = NGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; +#endif + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static inline u16 ngbe_get_hlen(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & NGBE_RXD_HDRBUFLEN_MASK; + + UNREFERENCED_PARAMETER(rx_ring); + + if (hlen > (NGBE_RX_HDR_SIZE << NGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= NGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT +/** + * ngbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *ngbe_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = NGBE_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + NGBE_CB(tail)->head = NULL; + + return head; +} + +/** + * ngbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via ngbe_merge_active_tail. + **/ +static inline void ngbe_add_active_tail(struct sk_buff *head, + struct sk_buff *tail) +{ + struct sk_buff *old_tail = NGBE_CB(head)->tail; + + if (old_tail) { + ngbe_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + NGBE_CB(tail)->head = head; + NGBE_CB(head)->tail = tail; +} + +/** + * ngbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool ngbe_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = NGBE_CB(head)->tail; + + if (!tail) + return false; + + ngbe_merge_active_tail(tail); + + NGBE_CB(head)->tail = NULL; + + return true; +} + +#endif +#ifdef HAVE_VLAN_RX_REGISTER +/** + * ngbe_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ngbe_receive_skb(struct ngbe_q_vector *q_vector, + struct sk_buff *skb) +{ + u16 vlan_tag = NGBE_CB(skb)->vid; + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + if (vlan_tag & VLAN_VID_MASK) { + /* by placing vlgrp at start of structure we can alias it */ + struct vlan_group **vlgrp = netdev_priv(skb->dev); + if (!*vlgrp) + dev_kfree_skb_any(skb); + else if (q_vector->netpoll_rx) + vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); + else + vlan_gro_receive(&q_vector->napi, + *vlgrp, vlan_tag, skb); + } else { +#endif + if (q_vector->netpoll_rx) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + } +#endif +} + +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifndef NGBE_NO_LRO +/** + * ngbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled + * @rx_ring: structure containing ring specific data + * @rx_desc: pointer to the rx descriptor + * @skb: pointer to the skb to be merged + * + **/ +static inline bool ngbe_can_lro(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + ngbe_dptype dec_ptype = decode_rx_desc_ptype(rx_desc); + + /* verify hardware indicates this is IPv4/TCP */ + if (!dec_ptype.known || + NGBE_DEC_PTYPE_ETYPE_NONE != dec_ptype.etype || + NGBE_DEC_PTYPE_IP_IPV4 != dec_ptype.ip || + NGBE_DEC_PTYPE_PROT_TCP != dec_ptype.prot) + return false; + + /* .. and LRO is enabled */ + if (!(rx_ring->netdev->features & NETIF_F_LRO)) + return false; + + /* .. and we are not in promiscuous mode */ + if (rx_ring->netdev->flags & IFF_PROMISC) + return false; + + /* .. and the header is large enough for us to read IP/TCP fields */ + if (!pskb_may_pull(skb, sizeof(struct ngbe_lrohdr))) + return false; + + /* .. and there are no VLANs on packet */ + if (skb->protocol != __constant_htons(ETH_P_IP)) + return false; + + /* .. and we are version 4 with no options */ + if (*(u8 *)iph != 0x45) + return false; + + /* .. and the packet is not fragmented */ + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + return false; + + /* .. and that next header is TCP */ + if (iph->protocol != IPPROTO_TCP) + return false; + + return true; +} + +static inline struct ngbe_lrohdr *ngbe_lro_hdr(struct sk_buff *skb) +{ + return (struct ngbe_lrohdr *)skb->data; +} + +/** + * ngbe_lro_flush - Indicate packets to upper layer. + * + * Update IP and TCP header part of head skb if more than one + * skb's chained and indicate packets to upper layer. + **/ +static void ngbe_lro_flush(struct ngbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ngbe_lro_list *lrolist = &q_vector->lrolist; + + __skb_unlink(skb, &lrolist->active); + + if (NGBE_CB(skb)->append_cnt) { + struct ngbe_lrohdr *lroh = ngbe_lro_hdr(skb); + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* close any active lro contexts */ + ngbe_close_active_frag_list(skb); + +#endif + /* incorporate ip header and re-calculate checksum */ + lroh->iph.tot_len = ntohs(skb->len); + lroh->iph.check = 0; + + /* header length is 5 since we know no options exist */ + lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); + + /* clear TCP checksum to indicate we are an LRO frame */ + lroh->th.check = 0; + + /* incorporate latest timestamp into the tcp header */ + if (NGBE_CB(skb)->tsecr) { + lroh->ts[2] = NGBE_CB(skb)->tsecr; + lroh->ts[1] = htonl(NGBE_CB(skb)->tsval); + } +#ifdef NETIF_F_GSO +#ifdef NAPI_GRO_CB + NAPI_GRO_CB(skb)->data_offset = 0; +#endif + skb_shinfo(skb)->gso_size = NGBE_CB(skb)->mss; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +#endif + } + +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + lrolist->stats.flushed++; +} + +static void ngbe_lro_flush_all(struct ngbe_q_vector *q_vector) +{ + struct ngbe_lro_list *lrolist = &q_vector->lrolist; + struct sk_buff *skb, *tmp; + + skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) + ngbe_lro_flush(q_vector, skb); +} + +/* + * ngbe_lro_header_ok - Main LRO function. + **/ +static void ngbe_lro_header_ok(struct sk_buff *skb) +{ + struct ngbe_lrohdr *lroh = ngbe_lro_hdr(skb); + u16 opt_bytes, data_len; + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + NGBE_CB(skb)->tail = NULL; +#endif + NGBE_CB(skb)->tsecr = 0; + NGBE_CB(skb)->append_cnt = 0; + NGBE_CB(skb)->mss = 0; + + /* ensure that the checksum is valid */ + if (skb->ip_summed != CHECKSUM_UNNECESSARY) + return; + + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) + return; + + /* ensure no bits set besides ack or psh */ + if (lroh->th.fin || lroh->th.syn || lroh->th.rst || + lroh->th.urg || lroh->th.ece || lroh->th.cwr || + !lroh->th.ack) + return; + + /* store the total packet length */ + data_len = ntohs(lroh->iph.tot_len); + + /* remove any padding from the end of the skb */ + __pskb_trim(skb, data_len); + + /* remove header length from data length */ + data_len -= sizeof(struct ngbe_lrohdr); + + /* + * check for timestamps. Since the only option we handle are timestamps, + * we only have to handle the simple case of aligned timestamps + */ + opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); + if (opt_bytes != 0) { + if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || + !pskb_may_pull(skb, sizeof(struct ngbe_lrohdr) + + TCPOLEN_TSTAMP_ALIGNED) || + (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP)) || + (lroh->ts[2] == 0)) { + return; + } + + NGBE_CB(skb)->tsval = ntohl(lroh->ts[1]); + NGBE_CB(skb)->tsecr = lroh->ts[2]; + + data_len -= TCPOLEN_TSTAMP_ALIGNED; + } + + /* record data_len as mss for the packet */ + NGBE_CB(skb)->mss = data_len; + NGBE_CB(skb)->next_seq = ntohl(lroh->th.seq); +} + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +static void ngbe_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) +{ + struct skb_shared_info *sh_info; + struct skb_shared_info *new_skb_info; + unsigned int data_len; + + sh_info = skb_shinfo(lro_skb); + new_skb_info = skb_shinfo(new_skb); + + /* copy frags into the last skb */ + memcpy(sh_info->frags + sh_info->nr_frags, + new_skb_info->frags, + new_skb_info->nr_frags * sizeof(skb_frag_t)); + + /* copy size data over */ + sh_info->nr_frags += new_skb_info->nr_frags; + data_len = NGBE_CB(new_skb)->mss; + lro_skb->len += data_len; + lro_skb->data_len += data_len; + lro_skb->truesize += data_len; + + /* wipe record of data from new_skb and free it */ + new_skb_info->nr_frags = 0; + new_skb->len = new_skb->data_len = 0; + dev_kfree_skb_any(new_skb); +} + +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +/** + * ngbe_lro_receive - if able, queue skb into lro chain + * @q_vector: structure containing interrupt and ring information + * @new_skb: pointer to current skb being checked + * + * Checks whether the skb given is eligible for LRO and if that's + * fine chains it to the existing lro_skb based on flowid. If an LRO for + * the flow doesn't exist create one. + **/ +static void ngbe_lro_receive(struct ngbe_q_vector *q_vector, + struct sk_buff *new_skb) +{ + struct sk_buff *lro_skb; + struct ngbe_lro_list *lrolist = &q_vector->lrolist; + struct ngbe_lrohdr *lroh = ngbe_lro_hdr(new_skb); + __be32 saddr = lroh->iph.saddr; + __be32 daddr = lroh->iph.daddr; + __be32 tcp_ports = *(__be32 *)&lroh->th; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid = NGBE_CB(new_skb)->vid; +#else + u16 vid = new_skb->vlan_tci; +#endif + + ngbe_lro_header_ok(new_skb); + + /* + * we have a packet that might be eligible for LRO, + * so see if it matches anything we might expect + */ + skb_queue_walk(&lrolist->active, lro_skb) { + u16 data_len; + + if (*(__be32 *)&ngbe_lro_hdr(lro_skb)->th != tcp_ports || + ngbe_lro_hdr(lro_skb)->iph.saddr != saddr || + ngbe_lro_hdr(lro_skb)->iph.daddr != daddr) + continue; + +#ifdef HAVE_VLAN_RX_REGISTER + if (NGBE_CB(lro_skb)->vid != vid) +#else + if (lro_skb->vlan_tci != vid) +#endif + continue; + + /* out of order packet */ + if (NGBE_CB(lro_skb)->next_seq != + NGBE_CB(new_skb)->next_seq) { + ngbe_lro_flush(q_vector, lro_skb); + NGBE_CB(new_skb)->mss = 0; + break; + } + + /* TCP timestamp options have changed */ + if (!NGBE_CB(lro_skb)->tsecr != !NGBE_CB(new_skb)->tsecr) { + ngbe_lro_flush(q_vector, lro_skb); + break; + } + + /* make sure timestamp values are increasing */ + if (NGBE_CB(lro_skb)->tsecr && + NGBE_CB(lro_skb)->tsval > NGBE_CB(new_skb)->tsval) { + ngbe_lro_flush(q_vector, lro_skb); + NGBE_CB(new_skb)->mss = 0; + break; + } + + data_len = NGBE_CB(new_skb)->mss; + + /* Check for all of the above below + * malformed header + * no tcp data + * resultant packet would be too large + * new skb is larger than our current mss + * data would remain in header + * we would consume more frags then the sk_buff contains + * ack sequence numbers changed + * window size has changed + */ + if (data_len == 0 || + data_len > NGBE_CB(lro_skb)->mss || + data_len > NGBE_CB(lro_skb)->free || +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + data_len != new_skb->data_len || + skb_shinfo(new_skb)->nr_frags >= + (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) || +#endif + ngbe_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || + ngbe_lro_hdr(lro_skb)->th.window != lroh->th.window) { + ngbe_lro_flush(q_vector, lro_skb); + break; + } + + /* Remove IP and TCP header */ + skb_pull(new_skb, new_skb->len - data_len); + + /* update timestamp and timestamp echo response */ + NGBE_CB(lro_skb)->tsval = NGBE_CB(new_skb)->tsval; + NGBE_CB(lro_skb)->tsecr = NGBE_CB(new_skb)->tsecr; + + /* update sequence and free space */ + NGBE_CB(lro_skb)->next_seq += data_len; + NGBE_CB(lro_skb)->free -= data_len; + + /* update append_cnt */ + NGBE_CB(lro_skb)->append_cnt++; + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* if header is empty pull pages into current skb */ + ngbe_merge_frags(lro_skb, new_skb); +#else + /* chain this new skb in frag_list */ + ngbe_add_active_tail(lro_skb, new_skb); +#endif + + if ((data_len < NGBE_CB(lro_skb)->mss) || lroh->th.psh || + skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) { + ngbe_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; + ngbe_lro_flush(q_vector, lro_skb); + } + + lrolist->stats.coal++; + return; + } + + if (NGBE_CB(new_skb)->mss && !lroh->th.psh) { + /* if we are at capacity flush the tail */ + if (skb_queue_len(&lrolist->active) >= NGBE_LRO_MAX) { + lro_skb = skb_peek_tail(&lrolist->active); + if (lro_skb) + ngbe_lro_flush(q_vector, lro_skb); + } + + /* update sequence and free space */ + NGBE_CB(new_skb)->next_seq += NGBE_CB(new_skb)->mss; + NGBE_CB(new_skb)->free = 65521 - new_skb->len; + + /* .. and insert at the front of the active list */ + __skb_queue_head(&lrolist->active, new_skb); + + lrolist->stats.coal++; + return; + } + + /* packet not handled by any of the above, pass it to the stack */ +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_receive_skb(q_vector, new_skb); +#else + napi_gro_receive(&q_vector->napi, new_skb); +#endif /* HAVE_VLAN_RX_REGISTER */ +} + +#endif /* NGBE_NO_LRO */ + +static void ngbe_rx_vlan(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifndef HAVE_VLAN_RX_REGISTER + u8 idx = 0; + u16 ethertype; +#endif +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((ring->netdev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) && +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if ((ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && +#else + if ((ring->netdev->features & NETIF_F_HW_VLAN_RX) && +#endif + ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_VP)) +#ifndef HAVE_VLAN_RX_REGISTER + { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_TPID_MASK) >> NGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +#else /* !HAVE_VLAN_RX_REGISTER */ + NGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + NGBE_CB(skb)->vid = 0; +#endif /* !HAVE_VLAN_RX_REGISTER */ +} + +/** + * ngbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ngbe_process_skb_fields(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_PTP_1588_CLOCK + u32 flags = rx_ring->q_vector->adapter->flags; +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef NETIF_F_RXHASH + ngbe_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ + + ngbe_rx_checksum(rx_ring, rx_desc, skb); +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(flags & NGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_TS))) { + ngbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } +#endif /* HAVE_PTP_1588_CLOCK */ + + ngbe_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ngbe_rx_skb(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ngbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif + +#ifndef NGBE_NO_LRO + if (ngbe_can_lro(rx_ring, rx_desc, skb)) + ngbe_lro_receive(q_vector, skb); + else +#endif +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + +#ifndef NETIF_F_GRO + rx_ring->netdev->last_rx = jiffies; +#endif +} + +/** + * ngbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ngbe_is_non_eop(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#else + struct ngbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#endif + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NGBE_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + ngbe_add_active_tail(skb, next_skb); + NGBE_CB(next_skb)->head = skb; +#else + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT +/** + * ngbe_pull_tail - ngbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an ngbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ngbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, NGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ngbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ngbe_dma_sync_frag(struct ngbe_ring *rx_ring, + struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(NGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, NGBE_CB(skb)->dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + NGBE_RX_DMA_ATTR); +#endif + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + +/** + * ngbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ngbe_cleanup_headers(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + if (IS_ERR(skb)) + return true; + + /* verify that the packet does not have any known errors */ + if (unlikely(ngbe_test_staterr(rx_desc, + NGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb) && !skb_headlen(skb)) + ngbe_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ngbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ngbe_reuse_rx_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *old_buff) +{ + struct ngbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; +#endif + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma, + new_buff->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool ngbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * ngbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ngbe_add_rx_frag(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ngbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ngbe_rx_pg_size(rx_ring) - + ngbe_rx_bufsz(rx_ring); +#endif + + if ((size <= NGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb) && + !ring_is_hs_enabled(rx_ring)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ngbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ngbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + +static struct sk_buff *ngbe_fetch_rx_buffer(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + NGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + + NGBE_CB(skb)->dma = rx_buffer->page_dma; + } else { + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) + ngbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +static struct sk_buff *ngbe_fetch_rx_buffer_hs(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + int hdr_len = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + rx_buffer->skb = NULL; + prefetchw(skb->data); + + if (!skb_is_nonlinear(skb)) { + hdr_len = ngbe_get_hlen(rx_ring, rx_desc); + if (hdr_len > 0) { + __skb_put(skb, hdr_len); + NGBE_CB(skb)->dma_released = true; + NGBE_CB(skb)->dma = rx_buffer->dma; + rx_buffer->dma = 0; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + NGBE_CB(skb)->dma = rx_buffer->page_dma; + goto add_frag; + } + } + + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) { + if (skb_headlen(skb)) { + if (NGBE_CB(skb)->dma_released == true) { + dma_unmap_single(rx_ring->dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + } else + ngbe_dma_sync_frag(rx_ring, skb); + } + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +add_frag: + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} +#define NGBE_XDP_PASS 0 +#define NGBE_XDP_CONSUMED 1 +#define NGBE_XDP_TX 2 +#define NGBE_XDP_REDIR 4 + + +#define NGBE_TXD_CMD (NGBE_TXD_EOP | \ + NGBE_TXD_RS) + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT +int ngbe_xmit_xdp_ring(struct ngbe_ring *ring, struct xdp_frame *xdpf) +#else +int ngbe_xmit_xdp_ring(struct ngbe_ring *ring, struct xdp_buff *xdp) +#endif +{ + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + u32 len, cmd_type = 0; + dma_addr_t dma; + u16 i; +#ifdef HAVE_XDP_FRAME_STRUCT + len = xdpf->len; +#else + len = xdp->data_end - xdp->data; +#endif + + if (unlikely(!ngbe_desc_unused(ring))) + return NGBE_XDP_CONSUMED; + +#ifdef HAVE_XDP_FRAME_STRUCT + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); +#else + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); +#endif + if (dma_mapping_error(ring->dev, dma)) + return NGBE_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = NGBE_TX_DESC(ring, i); + + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + +#ifdef HAVE_XDP_FRAME_STRUCT + tx_buffer->xdpf = xdpf; +#else + tx_buffer->data = xdp->data; +#endif + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + cmd_type = ngbe_tx_cmd_type(tx_buffer->tx_flags); + cmd_type |= len | NGBE_TXD_CMD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << NGBE_TXD_PAYLEN_SHIFT); + + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + + return NGBE_XDP_TX; +} +#endif +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +static inline void +_kc_bpf_warn_invalid_xdp_action(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + _kc_bpf_warn_invalid_xdp_action(dev, prog, act) +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +static bool ngbe_can_reuse_rx_page(struct ngbe_rx_buffer *rx_buffer, + struct ngbe_ring *rx_ring) +{ + struct page *page = rx_buffer->page; +#if (PAGE_SIZE < 8192) +#else + unsigned int last_offset = ngbe_rx_pg_size(rx_ring) - + ngbe_rx_bufsz(rx_ring); +#endif + + /* avoid re-using remote pages */ + if (unlikely(ngbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; +#else + if (rx_buffer->page_offset > last_offset) + return false; + +#endif + page_ref_inc(page); + return true; +} + + +static void ngbe_put_rx_buffer(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ngbe_can_reuse_rx_page(rx_buffer, rx_ring)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, + ngbe_rx_pg_order(rx_ring)); + + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static struct sk_buff * +ngbe_run_xdp(struct ngbe_adapter __maybe_unused *adapter, + struct ngbe_ring __maybe_unused *rx_ring, + struct ngbe_rx_buffer __maybe_unused *rx_buffer, + struct xdp_buff __maybe_unused *xdp) +{ + int result = NGBE_XDP_PASS; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; + struct ngbe_ring *ring; +#ifdef HAVE_XDP_FRAME_STRUCT + struct xdp_frame *xdpf; +#endif + int err; + u32 act; + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) { + goto xdp_out; + } +#ifdef HAVE_XDP_FRAME_STRUCT + prefetchw(xdp->data_hard_start); /* xdp_frame write */ +#endif + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + page_ref_inc(rx_buffer->page); +#ifdef HAVE_XDP_FRAME_STRUCT + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = NGBE_XDP_CONSUMED; + break; + } +#endif + ring = adapter->xdp_ring[rx_ring->queue_index % adapter->num_xdp_queues]; + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); +#ifdef HAVE_XDP_FRAME_STRUCT + result = ngbe_xmit_xdp_ring(ring, xdpf); +#else + result = ngbe_xmit_xdp_ring(ring, xdp); +#endif + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + page_ref_inc(rx_buffer->page); + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) { + result = NGBE_XDP_REDIR; + } else { + result = NGBE_XDP_CONSUMED; + } + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + fallthrough; + case XDP_DROP: + result = NGBE_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); +#endif /* HAVE_XDP_SUPPORT */ + + return ERR_PTR(-result); +} + +static unsigned int ngbe_rx_frame_truesize(struct ngbe_ring *rx_ring, + unsigned int size) +{ + + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = ngbe_rx_bufsz(rx_ring); +#else + truesize = ALIGN(size, L1_CACHE_BYTES) +#ifdef HAVE_XDP_BUFF_FRAME_SZ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#endif + ; +#endif + return truesize; +} +static void ngbe_rx_buffer_flip(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = ngbe_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +/** + * ngbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed. + **/ +static int ngbe_clean_rx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0, xdp_xmit = 0; + u16 cleaned_count = ngbe_desc_unused(rx_ring); + struct ngbe_adapter *adapter = q_vector->adapter; + struct xdp_buff xdp; + xdp.data = NULL; + xdp.data_end = NULL; +#ifdef HAVE_XDP_BUFF_RXQ + if(rx_ring->xdp_prog) + xdp.rxq = &rx_ring->xdp_rxq; +#endif + +#ifdef HAVE_XDP_BUFF_FRAME_SZ + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + if(rx_ring->xdp_prog) + xdp.frame_sz = ngbe_rx_frame_truesize(rx_ring, 0); +#endif +#endif + do { + struct ngbe_rx_buffer *rx_buffer; + union ngbe_rx_desc *rx_desc; + struct sk_buff *skb = NULL; + unsigned int size = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= NGBE_RX_BUFFER_WRITE) { + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = NGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) { + break; + } + if (rx_ring->xdp_prog){ + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) { + break; + } + } + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + if (rx_ring->xdp_prog) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META + xdp.data_meta = xdp.data; +#endif /* HAVE_XDP_BUFF_DATA_META */ + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + +#ifdef HAVE_XDP_BUFF_FRAME_SZ +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = ngbe_rx_frame_truesize(rx_ring, size); +#endif +#endif + skb = ngbe_run_xdp(adapter, rx_ring, rx_buffer, &xdp); + } + if (IS_ERR(skb)) { + if ((PTR_ERR(skb) == -NGBE_XDP_TX) || (PTR_ERR(skb) == -NGBE_XDP_REDIR)) { + xdp_xmit = (-PTR_ERR(skb)); + ngbe_rx_buffer_flip(rx_ring, rx_buffer, + size); + } + total_rx_packets++; + total_rx_bytes += size; + } else { + if (ring_is_hs_enabled(rx_ring)) + skb = ngbe_fetch_rx_buffer_hs(rx_ring, rx_desc); + else + skb = ngbe_fetch_rx_buffer(rx_ring, rx_desc); + } + /* exit if we failed to retrieve a buffer */ + if (!skb) { + break; + } + if (IS_ERR(skb)) { + ngbe_put_rx_buffer(rx_ring, rx_buffer, skb); + } + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ngbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ngbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ngbe_process_skb_fields(rx_ring, rx_desc, skb); + + ngbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); +#ifdef HAVE_XDP_SUPPORT + if (xdp_xmit & NGBE_XDP_TX) { + struct ngbe_ring *ring = adapter->xdp_ring[rx_ring->queue_index % adapter->num_xdp_queues]; + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + wmb(); + writel(ring->next_to_use, ring->tail); + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + } + if (xdp_xmit & NGBE_XDP_REDIR) + xdp_do_flush_map(); +#endif + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + +#ifndef NGBE_NO_LRO + ngbe_lro_flush_all(q_vector); +#endif + return total_rx_packets; +} + +#else /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +/** + * ngbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a legacy approach to Rx interrupt + * handling. This version will perform better on systems with a low cost + * dma mapping API. + * + * Returns amount of work completed. + **/ +static int ngbe_clean_rx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 len = 0; + u16 cleaned_count = ngbe_desc_unused(rx_ring); + + do { + struct ngbe_rx_buffer *rx_buffer; + union ngbe_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= NGBE_RX_BUFFER_WRITE) { + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = NGBE_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) + break; + + /* + * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.length); + /* pull the header of the skb in */ + __skb_put(skb, len); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!NGBE_CB(skb)->head) { + NGBE_CB(skb)->dma = rx_buffer->dma; + } else { + skb = ngbe_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (ngbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + NGBE_CB(skb)->dma = 0; + + if (ngbe_close_active_frag_list(skb) && + !NGBE_CB(skb)->append_cnt) { + dev_kfree_skb_any(skb); + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(ngbe_test_staterr(rx_desc, + NGBE_RXD_ERR_FRAME_ERR_MASK))) { + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ngbe_process_skb_fields(rx_ring, rx_desc, skb); + + ngbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + +#ifndef NGBE_NO_LRO + ngbe_lro_flush_all(q_vector); + +#endif /* NGBE_NO_LRO */ + return total_rx_packets; +} + +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ngbe_busy_poll_recv(struct napi_struct *napi) +{ + struct ngbe_q_vector *q_vector = + container_of(napi, struct ngbe_q_vector, napi); + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_ring *ring; + int found = 0; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ngbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ngbe_for_each_ring(ring, q_vector->rx) { + found = ngbe_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ngbe_qv_unlock_poll(q_vector); + + return found; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +/** + * ngbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ngbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ngbe_configure_msix(struct ngbe_adapter *adapter) +{ + u16 v_idx; + u32 i; + u32 eitrsel = 0; + + + /* Populate MSIX to EITR Select */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + else { + for(i = 0;i < adapter->num_vfs; i++) { + eitrsel |= 1 << i; + } + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + ngbe_write_eitr(q_vector); + } + + /* misc ivar from seq 1 to seq 8 */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + v_idx += adapter->ring_feature[RING_F_VMDQ].offset; + + ngbe_set_ivar(adapter, -1, 0, v_idx); + wr32(&adapter->hw, NGBE_PX_ITR(v_idx), 1950); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ngbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ngbe_param.c) + **/ +#if 0 +static void ngbe_update_itr(struct ngbe_q_vector *q_vector, + struct ngbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate manangbeent + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) { + itr_setting = low_latency; + } + break; + case low_latency: + if (bytes_perint > 20) { + itr_setting = bulk_latency; + } else if (bytes_perint <= 10) { + itr_setting = lowest_latency; + } + break; + case bulk_latency: + if (bytes_perint <= 20) { + itr_setting = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} +#endif +/** + * ngbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ngbe_write_eitr(struct ngbe_q_vector *q_vector) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & NGBE_MAX_EITR; + + itr_reg |= NGBE_PX_ITR_CNT_WDIS; + + wr32(hw, NGBE_PX_ITR(v_idx), itr_reg); +} + +#if 0 +static void ngbe_set_itr(struct ngbe_q_vector *q_vector) +{ + u16 new_itr = q_vector->itr; + u8 current_itr; + + ngbe_update_itr(q_vector, &q_vector->tx); + ngbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = NGBE_70K_ITR; + break; + case low_latency: + new_itr = NGBE_20K_ITR; + break; + case bulk_latency: + new_itr = NGBE_7K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (20 * new_itr * q_vector->itr) / + ((19 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + ngbe_write_eitr(q_vector); + } +} +#endif +/** + * ngbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void ngbe_check_overtemp_subtask(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + int temp_state; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + + + temp_state = ngbe_phy_check_overtemp(hw); + if (!temp_state || temp_state == NGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == NGBE_ERR_UNDERTEMP && + test_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_underheat_msg); + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + netif_carrier_on(adapter->netdev); +#ifdef HAVE_VIRTUAL_STATION + netdev_for_each_all_upper_dev_rcu(adapter->netdev, + upper, iter) { + if (!netif_is_macvlan(upper)) + continue; + netif_carrier_on(upper); + } +#endif + clear_bit(__NGBE_HANGING, &adapter->state); + } else if (temp_state == NGBE_ERR_OVERTEMP && + !test_and_set_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_overheat_msg); + netif_carrier_off(adapter->netdev); +#ifdef HAVE_VIRTUAL_STATION + netdev_for_each_all_upper_dev_rcu(adapter->netdev, + upper, iter) { + if (!netif_is_macvlan(upper)) + continue; + netif_carrier_off(upper); + } +#endif + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void ngbe_check_overtemp_event(struct ngbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_EVENT; + ngbe_service_event_schedule(adapter); + } +} + + +static void ngbe_handle_phy_event(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u32 reg; + + reg = rd32(hw, NGBE_GPIO_INTSTATUS); + wr32(hw, NGBE_GPIO_EOI,reg); + + if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) + hw->phy.ops.check_event(hw); + adapter->lsc_int++; + adapter->link_check_timeout = jiffies; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + ngbe_service_event_schedule(adapter); + } +} + +/** + * ngbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + struct ngbe_hw *hw = &adapter->hw; + + /* enable misc interrupt */ + mask = NGBE_PX_MISC_IEN_MASK; + + if (adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= NGBE_PX_MISC_IEN_OVER_HEAT; + +#ifdef HAVE_PTP_1588_CLOCK + mask |= NGBE_PX_MISC_IEN_TIMESYNC; +#endif /* HAVE_PTP_1588_CLOCK */ + + wr32(&adapter->hw, NGBE_GPIO_DDR, 0x1); + wr32(&adapter->hw, NGBE_GPIO_INTEN, 0x3); + wr32(&adapter->hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0); + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x0); + else + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x3); + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) + mask |= NGBE_PX_MISC_IEN_GPIO; +// mask &= ~NGBE_PX_MISC_IEN_GPIO; + + wr32(hw, NGBE_PX_MISC_IEN, mask); + + /* unmask interrupt */ + if (queues) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_ALL); + else { + if (!(adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP)) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC(adapter)); + else + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC_VMDQ(adapter)); + } + + /* flush configuration */ + if (flush) + NGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr & NGBE_PX_MISC_IC_VF_MBOX) + ngbe_msg_task(adapter); + + if (eicr & NGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); + if (hw->bus.lan_id == 0) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + ngbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); + } + + if (eicr & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + if (((ecc & NGBE_MIS_ST_LAN0_ECC) && (hw->bus.lan_id == 0)) || + ((ecc & NGBE_MIS_ST_LAN1_ECC) && (hw->bus.lan_id == 1)) || + ((ecc & NGBE_MIS_ST_LAN2_ECC) && (hw->bus.lan_id == 2)) || + ((ecc & NGBE_MIS_ST_LAN3_ECC) && (hw->bus.lan_id == 3))) { + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_ECC_ERR_RESET; + } + ngbe_service_event_schedule(adapter); + } + if (eicr & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + if ((eicr & NGBE_PX_MISC_IC_STALL) || + (eicr & NGBE_PX_MISC_IC_ETH_EVENT)) { + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + ngbe_service_event_schedule(adapter); + } + + ngbe_check_overtemp_event(adapter, eicr); + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); +#endif + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ngbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ngbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ngbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int ngbe_poll(struct napi_struct *napi, int budget) +{ + struct ngbe_q_vector *q_vector = + container_of(napi, struct ngbe_q_vector, napi); + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ngbe_for_each_ring(ring, q_vector->tx) { + if (!ngbe_clean_tx_irq(q_vector, ring)) + clean_complete = false; + } + +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + if (!ngbe_qv_lock_napi(q_vector)) + return budget; +#endif + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ngbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ngbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + if (cleaned >= per_ring_budget) + clean_complete = false; + } +#ifdef HAVE_NDO_BUSY_POLL + ngbe_qv_unlock_napi(q_vector); +#endif + +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; + +#endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); +#if 0 + if (adapter->rx_itr_setting == 1) + ngbe_set_itr(q_vector); +#endif + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_intr_enable(&adapter->hw, + NGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +/** + * ngbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ngbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ngbe_request_msix_irqs(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ngbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt" + " '%s' Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + vector += adapter->irq_remap_offset; + + err = request_irq(adapter->msix_entries[vector].vector, + ngbe_msix_other, 0, netdev->name, adapter); + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + vector -= adapter->irq_remap_offset; + + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + +#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); +#endif + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ngbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + u32 eicr_misc; + u32 ecc = 0; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + if (!eicr) { + /* + * shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + adapter->isb_mem[NGBE_ISB_VEC0] = 0; + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED)) + wr32(&(adapter->hw), NGBE_PX_INTA, 1); + + eicr_misc = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr_misc & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr_misc & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_ECC_ERR_RESET; + ngbe_service_event_schedule(adapter); + } + + if (eicr_misc & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + ngbe_check_overtemp_event(adapter, eicr_misc); + + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); +#endif + + adapter->isb_mem[NGBE_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ngbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * ngbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ngbe_request_irq(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + err = ngbe_request_msix_irqs(adapter); + else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &ngbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &ngbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ngbe_free_irq(struct ngbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + +#endif + free_irq(entry->vector, q_vector); + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + free_irq( + adapter->msix_entries[vector + adapter->irq_remap_offset].vector, + adapter); + } + else + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * ngbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void ngbe_irq_disable(struct ngbe_adapter *adapter) +{ + wr32(&adapter->hw, NGBE_PX_MISC_IEN, 0); + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + + NGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ngbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ngbe_configure_msi_and_legacy(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + struct ngbe_ring *ring; + + ngbe_write_eitr(q_vector); + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + ngbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ngbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ngbe_configure_tx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = NGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), NGBE_PX_TR_CFG_SWFLSH); + NGBE_WRITE_FLUSH(hw); + + wr32(hw, NGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= NGBE_RING_SIZE(ring) << NGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT; + + + clear_bit(__NGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), txdctl); + + + /* poll to verify queue is enabled */ + do { + msleep(1); + txdctl = rd32(hw, NGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + + + +/** + * ngbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ngbe_configure_tx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_tx_queues > 1) + adapter->netdev->features |= NETIF_F_MULTI_QUEUE; + else + adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; +#endif + + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ngbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); + + wr32m(hw, NGBE_TSEC_BUF_AE, 0x3FF, 0x10); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + + wr32m(hw, NGBE_TSEC_CTL, 0x1, 1); + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE, NGBE_MAC_TX_CFG_TE); +} + +static void ngbe_enable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl |= NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void ngbe_disable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter) +{ + int i; + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ngbe_fc_tx_pause))) { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +static void ngbe_configure_srrctl(struct ngbe_adapter *adapter, + struct ngbe_ring *rx_ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, NGBE_PX_RR_CFG(reg_idx), + ~(NGBE_PX_RR_CFG_RR_HDR_SZ | + NGBE_PX_RR_CFG_RR_BUF_SZ | + NGBE_PX_RR_CFG_SPLIT_MODE)); + + /* configure header buffer length, needed for RSC */ + srrctl |= NGBE_RX_HDR_SIZE << NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + NGBE_PX_RR_CFG_BSIZEPKT_SHIFT; +#else + srrctl |= ngbe_rx_bufsz(rx_ring) >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (ring_is_hs_enabled(rx_ring)) + srrctl |= NGBE_PX_RR_CFG_SPLIT_MODE; +#endif + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + */ +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void ngbe_store_reta(struct ngbe_adapter *adapter) +{ + u32 i, reta_entries = ngbe_rss_indir_tbl_entries(adapter); + struct ngbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, NGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +#if 0 +/** + * Write the RETA table to HW (for devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ngbe_store_vfreta(struct ngbe_adapter *adapter) +{ + u32 i, reta_entries = ngbe_rss_indir_tbl_entries(adapter); + struct ngbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, NGBE_RDB_VMRSSTBL(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} +#endif +static void ngbe_setup_reta(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* + * Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 1; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, NGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ngbe_store_reta(adapter); +} +#if 0 +static void ngbe_setup_vfreta(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + +#if 0 + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, NGBE_RDB_VMRSSRK(i, pf_pool), + adapter->rss_key[i]); +#endif + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ngbe_store_vfreta(adapter); +} +#endif +static void ngbe_setup_mrqc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* VT, and RSS do not coexist at the same time */ + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { +// printk("ngbe_setup_mrqc not process\n"); + return; + } + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_PCSD, NGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = NGBE_RDB_RA_CTL_RSS_IPV4 | + NGBE_RDB_RA_CTL_RSS_IPV4_TCP | + NGBE_RDB_RA_CTL_RSS_IPV6 | + NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + ngbe_setup_reta(adapter); +#if 0 + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + ngbe_setup_vfreta(adapter); + } else { + ngbe_setup_reta(adapter); + } +#endif + + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + rss_field |= NGBE_RDB_RA_CTL_RSS_EN; + wr32(hw, NGBE_RDB_RA_CTL, rss_field); +} + +static void ngbe_rx_desc_queue_enable(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + do { + msleep(1); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); + } +} +#if 0 +/* disable the specified tx ring/queue */ +void ngbe_disable_tx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl, reg_offset, enable_mask; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + reg_offset = NGBE_PX_TR_CFG(reg_idx); + enable_mask = NGBE_PX_TR_CFG_ENABLE; + + /* write value back with TDCFG.ENABLE bit cleared */ + wr32m(hw, reg_offset, enable_mask, 0); + + /* the hardware may take up to 100us to really disable the tx queue */ + do { + udelay(10); + rxdctl = rd32(hw, reg_offset); + } while (--wait_loop && (rxdctl & enable_mask)); + + if (!wait_loop) { + e_err(drv, "TDCFG.ENABLE on Tx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} +#endif +/* disable the specified rx ring/queue */ +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, 0); + + /* hardware may take up to 100us to actually disable rx queue */ + do { + udelay(10); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ngbe_configure_rx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + ngbe_disable_rx_queue(adapter, ring); +#ifdef HAVE_XDP_BUFF_RXQ + if(ring->q_vector) + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); +#endif + wr32(hw, NGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_RR_BAH(reg_idx), rdba >> 32); + + if (ring->count == NGBE_MAX_RXD) + rxdctl |= 0 << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT; + wr32(hw, NGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; +#endif + + ngbe_configure_srrctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN); + + ngbe_rx_desc_queue_enable(adapter, ring); + ngbe_alloc_rx_buffers(ring, ngbe_desc_unused(ring)); +} + +static void ngbe_setup_psrtype(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = NGBE_RDB_PL_CFG_L4HDR | + NGBE_RDB_PL_CFG_L3HDR | + NGBE_RDB_PL_CFG_L2HDR | + NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + NGBE_RDB_PL_CFG_TUN_TUNHDR; + + + for_each_set_bit(pool, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + wr32(hw, NGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } +} + +/** + * ngbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void ngbe_configure_bridge_mode(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + if (adapter->flags & NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, 0); + } else { + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, NGBE_PSR_CTL_SW_EN); + } +} + +static void ngbe_configure_virtualization(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + u8 vfe = 0; + + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return; + + wr32m(hw, NGBE_PSR_VM_CTL, + NGBE_PSR_VM_CTL_POOL_MASK | + NGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << NGBE_PSR_VM_CTL_POOL_SHIFT | + NGBE_PSR_VM_CTL_REPLEN); + + for_each_set_bit(i, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + wr32m(hw, NGBE_PSR_VM_L2CTL(i), + NGBE_PSR_VM_L2CTL_AUPE, NGBE_PSR_VM_L2CTL_AUPE); + } + + vfe = 1 << (VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, NGBE_RDM_POOL_RE, vfe); + wr32(hw, NGBE_TDM_POOL_TE, vfe); + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return; + + /* configure default bridge settings */ + ngbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + NGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_FC), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | + NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + if (!adapter->vfinfo[i].spoofchk_enabled) + ngbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); +#endif + /* enable ethertype anti spoofing if hw supports it */ + hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + } +} + +static void ngbe_set_rx_buffer_len(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct ngbe_ring *rx_ring; + int i; + u32 mhadd; +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + u16 rx_buf_len; +#endif + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, NGBE_PSR_MAX_SZ); + if (max_frame != mhadd) { + wr32(hw, NGBE_PSR_MAX_SZ, max_frame); + } + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) { + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + } else if (max_frame <= NGBE_RXBUFFER_3K) { + rx_buf_len = NGBE_RXBUFFER_3K; + } else if (max_frame <= NGBE_RXBUFFER_7K) { + rx_buf_len = NGBE_RXBUFFER_7K; + } else if (max_frame <= NGBE_RXBUFFER_15K) { + rx_buf_len = NGBE_RXBUFFER_15K; + } else { + rx_buf_len = NGBE_MAX_RXBUFFER; + } +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + + if (adapter->flags & NGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = NGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else + clear_ring_hs_enabled(rx_ring); + +#ifdef CONFIG_NGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = rx_buf_len; +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ + } +} + +/** + * ngbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ngbe_configure_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; + + /* disable receives while setting up the descriptors */ + hw->mac.ops.disable_rx(hw); + + ngbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_CRC_STRIP, NGBE_RSEC_CTL_CRC_STRIP); + + /* Program registers for the distribution of queues */ + ngbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ngbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= NGBE_RDB_PB_CTL_PBEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#if defined(NETIF_F_HW_VLAN_CTAG_TX) || defined(NETIF_F_HW_VLAN_STAG_TX) +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX || NETIF_F_HW_VLAN_STAG_TX*/ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ +static void ngbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { +#ifndef HAVE_VLAN_RX_REGISTER + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); +#endif + hw->mac.ops.set_vfta(hw, vid, pool_ndx, true); + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + NGBE_MAX_MACVLANS) + hw->mac.ops.set_vfta(hw, vid, + VMDQ_P(i), true); + } + } +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* + * Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) +static int ngbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int ngbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void ngbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#else + return; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); + +#endif /* HAVE_VLAN_RX_REGISTER */ + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) { + hw->mac.ops.set_vfta(hw, vid, pool_ndx, false); + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + NGBE_MAX_MACVLANS) + hw->mac.ops.set_vfta(hw, vid, + VMDQ_P(i), false); + } + } +#ifndef HAVE_VLAN_RX_REGISTER + + clear_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_8021P_SUPPORT +/** + * ngbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, 0); + } +} + +#endif +/** + * ngbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN); + } +} + +#ifdef HAVE_VLAN_RX_REGISTER +static void ngbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) +#else +void ngbe_vlan_mode(struct net_device *netdev, u32 features) +#endif +{ +#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) + struct ngbe_adapter *adapter = netdev_priv(netdev); +#endif +#ifdef HAVE_8021P_SUPPORT + bool enable; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_disable(adapter); + + adapter->vlgrp = grp; + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); +#endif +#ifdef HAVE_8021P_SUPPORT +#ifdef HAVE_VLAN_RX_REGISTER + enable = grp; +#else +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)); +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX); +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_STAG_RX); +#else + enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_VLAN_RX_REGISTER */ + if (enable) + /* enable VLAN tag insert/strip */ + ngbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + ngbe_vlan_strip_disable(adapter); + +#endif /* HAVE_8021P_SUPPORT */ +} + +static void ngbe_restore_vlan(struct ngbe_adapter *adapter) +{ +#ifdef HAVE_VLAN_RX_REGISTER + ngbe_vlan_mode(adapter->netdev, adapter->vlgrp); + + /* + * add vlan ID 0 and enable vlan tag stripping so we + * always accept priority-tagged traffic + */ +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + ngbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + ngbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif +#ifndef HAVE_8021P_SUPPORT + ngbe_vlan_strip_enable(adapter); +#endif + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + ngbe_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else + ngbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif + } + } +#else + struct net_device *netdev = adapter->netdev; + u16 vid; + + ngbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + ngbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +#else + ngbe_vlan_rx_add_vid(netdev, vid); +#endif +#endif +} + +#endif +static u8 *ngbe_addr_list_itr(struct ngbe_hw __maybe_unused *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif +#ifdef CONFIG_PCI_IOV + struct ngbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif + else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * ngbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int ngbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#endif + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + + if (netdev_mc_empty(netdev)) { + hw->mac.ops.update_mc_addr_list(hw, NULL, 0, + ngbe_addr_list_itr, true); + } else { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; +#else + addr_list = netdev->mc_list->dmi_addr; +#endif + addr_count = netdev_mc_count(netdev); + + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + ngbe_addr_list_itr, true); + } + +#ifdef CONFIG_PCI_IOV + ngbe_restore_vf_multicasts(adapter); +#endif + return addr_count; +} + + +void ngbe_full_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + hw->mac.ops.clear_rar(hw, i); + } + adapter->mac_table[i].state &= ~(NGBE_MAC_STATE_MODIFIED); + } +} + +static void ngbe_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + NGBE_MAC_STATE_IN_USE) { + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + hw->mac.ops.clear_rar(hw, i); + } + adapter->mac_table[i].state &= + ~(NGBE_MAC_STATE_MODIFIED); + } + } +} + +int ngbe_available_rars(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, + u8 *addr) +{ + struct ngbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); + adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT | + NGBE_MAC_STATE_IN_USE); + hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); +} + +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools != (1ULL << pool)) { + adapter->mac_table[i].pools |= (1ULL << pool); + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + ngbe_sync_mac_table(adapter); + return i; + } + } + } + } + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + continue; + } + adapter->mac_table[i].state |= (NGBE_MAC_STATE_MODIFIED | + NGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools = (1ULL << pool); + ngbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +static void ngbe_flush_sw_mac_table(struct ngbe_adapter *adapter) +{ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + ngbe_sync_mac_table(adapter); +} + +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools & (1ULL << pool)) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].pools &= ~(1ULL << pool); + if (!adapter->mac_table[i].pools) { + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } + ngbe_sync_mac_table(adapter); + return 0; + } + } + } + return -ENOMEM; +} + +#ifdef HAVE_SET_RX_MODE +/** + * ngbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > ngbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + ngbe_del_mac_filter(adapter, ha->addr, pool); + ngbe_add_mac_filter(adapter, ha->addr, pool); +#else + ngbe_del_mac_filter(adapter, ha->da_addr, pool); + ngbe_add_mac_filter(adapter, ha->da_addr, pool); +#endif + count++; + } + } + return count; +} + +#endif + +/** + * ngbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ngbe_set_rx_mode(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, NGBE_PSR_CTL, + ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, NGBE_PSR_VM_L2CTL(VMDQ_P(0)), + ~(NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, NGBE_PSR_VLAN_CTL, + ~(NGBE_PSR_VLAN_CTL_VFE | + NGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE; + vmolr |= NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_AUPE | + NGBE_PSR_VM_L2CTL_VACC; +#if defined(NETIF_F_HW_VLAN_TX) || \ + defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + vlnctrl |= NGBE_PSR_VLAN_CTL_VFE; +#endif + + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= NGBE_PSR_CTL_MPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (NGBE_PSR_VM_L2CTL_UPE | NGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_SAVE_MAC_ERR, + NGBE_RSEC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= NGBE_PSR_VM_L2CTL_ROPE | NGBE_PSR_VM_L2CTL_ROMPE; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ngbe_write_uc_addr_list(netdev, VMDQ_P(0)); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROPE; + vmolr |= NGBE_PSR_VM_L2CTL_UPE; + } + + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = ngbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + wr32(hw, NGBE_PSR_VLAN_CTL, vlnctrl); + wr32(hw, NGBE_PSR_CTL, fctrl); + wr32(hw, NGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (netdev->features & NETIF_F_HW_VLAN_STAG_RX)) +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) +#else + if (netdev->features & NETIF_F_HW_VLAN_RX) +#endif + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); +} + +static void ngbe_napi_enable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL + ngbe_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +static void ngbe_napi_disable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while (!ngbe_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif + } +} + +#ifdef NETIF_F_GSO_PARTIAL +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define NGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif /* NETIF_F_GSO_PARTIAL */ + +static inline unsigned long ngbe_tso_features(void) +{ + unsigned long features = 0; + +#ifdef NETIF_F_TSO + features |= NETIF_F_TSO; +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_TSO6 + features |= NETIF_F_TSO6; +#endif /* NETIF_F_TSO6 */ +#ifdef NETIF_F_GSO_PARTIAL + features |= NETIF_F_GSO_PARTIAL | NGBE_GSO_PARTIAL_FEATURES; +#endif + + return features; +} + +#ifndef NGBE_NO_LLI +static void ngbe_configure_lli(struct ngbe_adapter *adapter) +{ + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) + return; + + if (adapter->lli_etype) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + (NGBE_RDB_5T_CTL1_LLI | + NGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, NGBE_RDB_ETYPE_CLS(0), + NGBE_RDB_ETYPE_CLS_LLI); + wr32(&adapter->hw, NGBE_PSR_ETYPE_SWC(0), + (adapter->lli_etype | + NGBE_PSR_ETYPE_SWC_FILTER_EN)); + } + + if (adapter->lli_port) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + (NGBE_RDB_5T_CTL1_LLI | + NGBE_RDB_5T_CTL1_SIZE_BP)); + + wr32(&adapter->hw, NGBE_RDB_5T_CTL0(0), + (NGBE_RDB_5T_CTL0_POOL_MASK_EN | + (NGBE_RDB_5T_CTL0_PRIORITY_MASK << + NGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (NGBE_RDB_5T_CTL0_DEST_PORT_MASK << + NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + + wr32(&adapter->hw, NGBE_RDB_5T_SDP(0), + (adapter->lli_port << 16)); + } + + if (adapter->lli_size) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + NGBE_RDB_5T_CTL1_LLI); + wr32m(&adapter->hw, NGBE_RDB_LLI_THRE, + NGBE_RDB_LLI_THRE_SZ(~0), adapter->lli_size); + wr32(&adapter->hw, NGBE_RDB_5T_CTL0(0), + (NGBE_RDB_5T_CTL0_POOL_MASK_EN | + (NGBE_RDB_5T_CTL0_PRIORITY_MASK << + NGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (NGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK << + NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + wr32m(&adapter->hw, NGBE_RDB_LLI_THRE, + NGBE_RDB_LLI_THRE_PRIORITY_EN | + NGBE_RDB_LLI_THRE_UP(~0), + NGBE_RDB_LLI_THRE_PRIORITY_EN | + (adapter->lli_vlan_pri << NGBE_RDB_LLI_THRE_UP_SHIFT)); + } +} + +#endif /* NGBE_NO_LLI */ +/* Additional bittime to account for NGBE framing */ +#define NGBE_ETH_FRAMING 20 + + +/* + * ngbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int ngbe_hpbthresh(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NGBE_ETH_FRAMING; + + /* Calculate delay value for device */ + dv_id = NGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + dv_id += NGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = NGBE_BT2KB(dv_id); + rx_pba = rd32(hw, NGBE_RDB_PB_SZ) + >> NGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer can not provide enough" + "headroom to suppport flow control." + "Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/* + * ngbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int ngbe_lpbthresh(struct ngbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = NGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return NGBE_BT2KB(dv_id); +} + + +/* + * ngbe_pbthresh_setup - calculate and setup high low water marks + */ + +static void ngbe_pbthresh_setup(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + + if (!num_tc) + num_tc = 1; + + hw->fc.high_water = ngbe_hpbthresh(adapter); + hw->fc.low_water = ngbe_lpbthresh(adapter); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water > hw->fc.high_water) + hw->fc.low_water = 0; +} + +static void ngbe_configure_pb(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int hdrm = 0; + int tc = netdev_get_num_tc(adapter->netdev); + + hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + ngbe_pbthresh_setup(adapter); +} + +static void ngbe_configure_isb(struct ngbe_adapter *adapter) +{ + /* set ISB Address */ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +} + +static void ngbe_configure_port(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 value, i; + + value = (adapter->num_vfs == 0) ? + NGBE_CFG_PORT_CTL_NUM_VT_NONE : + NGBE_CFG_PORT_CTL_NUM_VT_8; + + /* enable double vlan and qinq, NONE VT at default */ + value |= NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ | + NGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + wr32(hw, NGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, NGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +#ifdef HAVE_VIRTUAL_STATION +static void ngbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, + struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 vmolr; + + /* No unicast promiscuous support for VMDQ devices. */ + vmolr = rd32m(hw, NGBE_PSR_VM_L2CTL(pool), + ~(NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_ROMPE)); + + /* set all bits that we expect to always be set */ + vmolr |= NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_BAM | NGBE_PSR_VM_L2CTL_AUPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } else { + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + ngbe_write_mc_addr_list(dev); + } + + ngbe_write_uc_addr_list(adapter->netdev, pool); + wr32(hw, NGBE_PSR_VM_L2CTL(pool), vmolr); +} + +static void ngbe_fwd_psrtype(struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + struct ngbe_hw *hw = &adapter->hw; + u32 psrtype = NGBE_RDB_PL_CFG_L4HDR | + NGBE_RDB_PL_CFG_L3HDR | + NGBE_RDB_PL_CFG_L2HDR | + NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + NGBE_RDB_PL_CFG_TUN_TUNHDR; + + + wr32(hw, NGBE_RDB_PL_CFG(VMDQ_P(accel->index)), psrtype); +} + +static void ngbe_disable_fwd_ring(struct ngbe_fwd_adapter *accel, + struct ngbe_ring *rx_ring) +{ + struct ngbe_adapter *adapter = accel->adapter; + int index = rx_ring->queue_index + accel->rx_base_queue; + + /* shutdown specific queue receive and wait for dma to settle */ + ngbe_disable_rx_queue(adapter, rx_ring); + usleep_range(10000, 20000); + ngbe_intr_disable(&adapter->hw, NGBE_INTR_Q(index)); + ngbe_clean_rx_ring(rx_ring); + rx_ring->accel = NULL; +} + +static void ngbe_enable_fwd_ring(struct ngbe_fwd_adapter *accel, + struct ngbe_ring *rx_ring) +{ + struct ngbe_adapter *adapter = accel->adapter; + int index = rx_ring->queue_index + accel->rx_base_queue; + + ngbe_intr_enable(&adapter->hw, NGBE_INTR_Q(index)); +} + +static int ngbe_fwd_ring_down(struct net_device *vdev, + struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int txbase = accel->tx_base_queue; + int i; + + netif_tx_stop_all_queues(vdev); + + for (i = 0; i < adapter->queues_per_pool; i++) { + ngbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; + } + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->accel = NULL; + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; + } + + return 0; +} + +static int ngbe_fwd_ring_up(struct net_device *vdev, + struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + unsigned int rxbase, txbase, queues; + int i, baseq, err = 0; + + if (!test_bit(accel->index, &adapter->fwd_bitmask)) + return 0; + + baseq = VMDQ_P(accel->index) * adapter->queues_per_pool; + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->index, adapter->num_vmdqs, + baseq, baseq + adapter->queues_per_pool, + adapter->fwd_bitmask); + + accel->vdev = vdev; + accel->rx_base_queue = rxbase = baseq; + accel->tx_base_queue = txbase = baseq; + + for (i = 0; i < adapter->queues_per_pool; i++) + ngbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->rx_ring[rxbase + i]->netdev = vdev; + adapter->rx_ring[rxbase + i]->accel = accel; + ngbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); + } + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->netdev = vdev; + adapter->tx_ring[txbase + i]->accel = accel; + } + + queues = min_t(unsigned int, + adapter->queues_per_pool, vdev->num_tx_queues); + err = netif_set_real_num_tx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + err = netif_set_real_num_rx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + if (is_valid_ether_addr(vdev->dev_addr)) + ngbe_add_mac_filter(adapter, vdev->dev_addr, + VMDQ_P(accel->index)); + + ngbe_fwd_psrtype(accel); + ngbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->index), adapter); + + for (i = 0; i < adapter->queues_per_pool; i++) + ngbe_enable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + return err; +fwd_queue_err: + ngbe_fwd_ring_down(vdev, accel); + return err; +} + +static void ngbe_configure_dfwd(struct ngbe_adapter *adapter) +{ + struct net_device *upper; + struct list_head *iter; + int err; + + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ngbe_fwd_adapter *accel = dfwd->fwd_priv; + + if (accel) { + err = ngbe_fwd_ring_up(upper, accel); + if (err) + continue; + } + } + } +} +#endif /*HAVE_VIRTUAL_STATION*/ + +static void ngbe_configure(struct ngbe_adapter *adapter) +{ + ngbe_configure_pb(adapter); + + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ngbe_configure_virtualization(adapter); + /* configure Double Vlan */ + ngbe_configure_port(adapter); + + ngbe_set_rx_mode(adapter->netdev); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + ngbe_restore_vlan(adapter); +#endif + + ngbe_configure_tx(adapter); + ngbe_configure_rx(adapter); + ngbe_configure_isb(adapter); +#ifdef HAVE_VIRTUAL_STATION + ngbe_configure_dfwd(adapter); +#endif +} + + +/** + * ngbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_non_sfp_link_config(struct ngbe_hw *hw) +{ + u32 speed; + u32 ret = NGBE_ERR_LINK_SETUP; + struct ngbe_adapter *adapter = hw->back; + + if (hw->mac.autoneg) + speed = hw->phy.autoneg_advertised; + else + speed = hw->phy.force_speed; + + if (hw->ncsi_enabled || + (hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA || + adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP) + return 0; + + if (hw->phy.type == ngbe_phy_internal || + hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + hw->phy.ops.phy_resume(hw); + hw->phy.ops.setup_once(hw); + } + + ret = hw->mac.ops.setup_link(hw, speed, false); + + return ret; +} + +#if 0 +/** + * ngbe_clear_vf_stats_counters - Clear out VF stats after reset + * @adapter: board private structure + * + * On a reset we need to clear out the VF stats or accounting gets + * messed up because they're not clear on read. + **/ +static void ngbe_clear_vf_stats_counters(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + adapter->vfinfo->last_vfstats.gprc = + rd32(hw, NGBE_VX_GPRC); + adapter->vfinfo->saved_rst_vfstats.gprc += + adapter->vfinfo->vfstats.gprc; + adapter->vfinfo->vfstats.gprc = 0; + adapter->vfinfo->last_vfstats.gptc = + rd32(hw, NGBE_VX_GPTC); + adapter->vfinfo->saved_rst_vfstats.gptc += + adapter->vfinfo->vfstats.gptc; + adapter->vfinfo->vfstats.gptc = 0; + adapter->vfinfo->last_vfstats.gorc = + rd32(hw, NGBE_VX_GORC_LSB); + adapter->vfinfo->saved_rst_vfstats.gorc += + adapter->vfinfo->vfstats.gorc; + adapter->vfinfo->vfstats.gorc = 0; + adapter->vfinfo->last_vfstats.gotc = + rd32(hw, NGBE_VX_GOTC_LSB); + adapter->vfinfo->saved_rst_vfstats.gotc += + adapter->vfinfo->vfstats.gotc; + adapter->vfinfo->vfstats.gotc = 0; + adapter->vfinfo->last_vfstats.mprc = + rd32(hw, NGBE_VX_MPRC); + adapter->vfinfo->saved_rst_vfstats.mprc += + adapter->vfinfo->vfstats.mprc; + adapter->vfinfo->vfstats.mprc = 0; +} +#endif + +static void ngbe_setup_gpie(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + gpie = NGBE_PX_GPIE_MODEL; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + } + + wr32(hw, NGBE_PX_GPIE, gpie); +} + +static void ngbe_up_complete(struct ngbe_adapter *adapter) +{ + + struct ngbe_hw *hw = &adapter->hw; + int err; + + ngbe_get_hw_control(adapter); + ngbe_setup_gpie(adapter); + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + ngbe_configure_msix(adapter); + else + ngbe_configure_msi_and_legacy(adapter); + + smp_mb__before_atomic(); + clear_bit(__NGBE_DOWN, &adapter->state); + ngbe_napi_enable_all(adapter); +#ifndef NGBE_NO_LLI + ngbe_configure_lli(adapter); +#endif + + err = ngbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + + /* sellect GMII */ + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) | + NGBE_MAC_TX_CFG_SPEED_1G); + + /* clear any pending interrupts, may auto mask */ + rd32(hw, NGBE_PX_IC); + rd32(hw, NGBE_PX_MISC_IC); + ngbe_irq_enable(adapter, true, true); + + if (hw->gpio_ctl == 1) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, 0); + + /* enable transmits */ +// if (!((adapter->num_rx_queues == 0) && (adapter->num_tx_queues == 0))) + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (NGBE_POLL_LINK_STATUS == 1) + mod_timer(&adapter->link_check_timer, jiffies); + mod_timer(&adapter->service_timer, jiffies); + /* ngbe_clear_vf_stats_counters(adapter); */ + + if (hw->bus.lan_id == 0) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN0_UP, NGBE_MIS_PRB_CTL_LAN0_UP); + } + else if (hw->bus.lan_id == 1) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN1_UP, NGBE_MIS_PRB_CTL_LAN1_UP); + } + else if (hw->bus.lan_id == 2) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN2_UP, NGBE_MIS_PRB_CTL_LAN2_UP); + } + else if (hw->bus.lan_id == 3) { + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN3_UP, NGBE_MIS_PRB_CTL_LAN3_UP); + } + else + e_err(probe, "ngbe_up_complete:invalid bus lan id %d\n", hw->bus.lan_id); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_PFRSTD, NGBE_CFG_PORT_CTL_PFRSTD); + + /* clear ecc reset flag if set */ + if (adapter->flags2 & NGBE_FLAG2_ECC_ERR_RESET) { + adapter->flags2 &= ~NGBE_FLAG2_ECC_ERR_RESET; + } +} + +void ngbe_reinit_locked(struct ngbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ngbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ngbe_up(adapter); + clear_bit(__NGBE_RESETTING, &adapter->state); +} + +void ngbe_up(struct ngbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ngbe_configure(adapter); + + ngbe_up_complete(adapter); +} + +void ngbe_reset(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + break; + case NGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case NGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + ngbe_flush_sw_mac_table(adapter); + ngbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_reset(adapter); +#endif +} + +/** + * ngbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ngbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + if (NGBE_CB(skb)->dma_released) { + dma_unmap_single(dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + + if (NGBE_CB(skb)->page_released) + dma_unmap_page(dev, + NGBE_CB(skb)->dma, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +#else + /* We need to clean up RSC frag lists */ + skb = ngbe_merge_active_tail(skb); + if (ngbe_close_active_frag_list(skb)) + dma_unmap_single(dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; +#endif /* CONFIG_NGBE_DISABLE_PACKET_SPLIT */ + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + + __free_pages(rx_buffer->page, + ngbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; +#endif + } + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +#endif +} + +/** + * ngbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * ngbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_rx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ngbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_tx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ngbe_clean_tx_ring(adapter->xdp_ring[i]); +} + +static void ngbe_disable_device(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__NGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, NGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* ping all the active vfs to let them know we are going down */ + ngbe_ping_all_vfs_with_link_status(adapter, false); + + /* Disable all VFTE/VFRE TX/RX */ + ngbe_disable_tx_rx(adapter); + } + + if (!(adapter->flags2 & NGBE_FLAG2_ECC_ERR_RESET)) + ngbe_disable_pcie_master(hw); + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ngbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + +#ifdef HAVE_VIRTUAL_STATION + /* disable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + } +#endif + + if (hw->gpio_ctl == 1) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_rcu(); + + ngbe_irq_disable(adapter); + + ngbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + if (NGBE_POLL_LINK_STATUS == 1) + del_timer_sync(&adapter->link_check_timer); + + if (hw->bus.lan_id == 0) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN0_UP, 0); + else if (hw->bus.lan_id == 1) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN1_UP, 0); + else if (hw->bus.lan_id == 2) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN2_UP, 0); + else if (hw->bus.lan_id == 3) + wr32m(hw, NGBE_MIS_PRB_CTL, + NGBE_MIS_PRB_CTL_LAN3_UP, 0); + else + e_dev_err("ngbe_disable_device:invalid bus lan id %d\n", hw->bus.lan_id); + + /*OCP NCSI need it*/ + if (!((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP || + hw->ncsi_enabled || + adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP)) + /* disable mac transmiter */ + wr32m(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + wr32(hw, NGBE_PX_TR_CFG(reg_idx), + NGBE_PX_TR_CFG_SWFLSH); + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + u8 reg_idx = adapter->xdp_ring[i]->reg_idx; + wr32(hw, NGBE_PX_TR_CFG(reg_idx), + NGBE_PX_TR_CFG_SWFLSH); + } + /* Disable the Tx DMA engine */ + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); +} + + +void ngbe_down(struct ngbe_adapter *adapter) +{ + ngbe_disable_device(adapter); + +#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) +#endif + ngbe_reset(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); +} + +/** + * ngbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ngbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +static int ngbe_init_shared_code(struct ngbe_hw *hw) +{ + int wol_mask = 0, ncsi_mask = 0; + u16 type_mask = 0, val; + u32 lan_en; + + lan_en = rd32(hw, NGBE_MIS_PWR); + if (!(lan_en & BIT(hw->bus.lan_id + 28))) + return -EIO; + type_mask = (u16)(hw->subsystem_device_id & OEM_MASK); + ncsi_mask = hw->subsystem_device_id & NCSI_SUP_MASK; + wol_mask = hw->subsystem_device_id & WOL_SUP_MASK; + + val = rd32(hw, NGBE_CFG_PORT_ST); + hw->mac_type = (val & BIT(7)) >> 7 ? + em_mac_type_rgmii : + em_mac_type_mdi; + + hw->wol_enabled = (wol_mask == WOL_SUP) ? 1 : 0; + hw->ncsi_enabled = (ncsi_mask == NCSI_SUP || + type_mask == OCP_CARD) ? 1 : 0; + + switch (type_mask) { + case LY_YT8521S_SFP: + case LY_M88E1512_SFP: + case YT8521S_SFP_GPIO: + case INTERNAL_YT8521S_SFP_GPIO: + hw->gpio_ctl = 1; + break; + default: + hw->gpio_ctl = 0; + break; + } + + switch (type_mask) { + case M88E1512_SFP: + case LY_M88E1512_SFP: + hw->phy.type = ngbe_phy_m88e1512_sfi; + break; + case M88E1512_RJ45: + hw->phy.type = ngbe_phy_m88e1512; + break; + case M88E1512_MIX: + hw->phy.type = ngbe_phy_m88e1512_unknown; + break; + case YT8521S_SFP: + case YT8521S_SFP_GPIO: + case LY_YT8521S_SFP: + hw->phy.type = ngbe_phy_yt8521s_sfi; + break; + case INTERNAL_YT8521S_SFP: + case INTERNAL_YT8521S_SFP_GPIO: + hw->phy.type = ngbe_phy_internal_yt8521s_sfi; + break; + default: + hw->phy.type = ngbe_phy_internal; + break; + } + + /* select claus22 */ + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + ngbe_init_ops(hw); + + return 0; +} + +/** + * ngbe_sw_init - Initialize general software structures (struct ngbe_adapter) + * @adapter: board private structure to initialize + * + * ngbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int __devinit ngbe_sw_init(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + u32 ssid = 0; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == NGBE_FAILED_READ_CFG_BYTE && + ngbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } + + hw->oem_svid = pdev->subsystem_vendor; + hw->oem_ssid = pdev->subsystem_device; + if (pdev->subsystem_vendor == 0x8088) { + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + } else { + err = ngbe_flash_read_dword(hw, 0xfffdc, &ssid); + if (err) { + e_err(probe, "read of internel subsystem device id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_device_id = (u16)ssid; + hw->subsystem_device_id = hw->subsystem_device_id >> 8 | + hw->subsystem_device_id << 8; + } + + /* phy type, phy ops, mac ops */ + err = ngbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + adapter->mac_table = kzalloc(sizeof(struct ngbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = NGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); + + /* Set common capability flags and settings */ + adapter->max_q_vectors = NGBE_MAX_MSIX_Q_VECTORS_EMERALD; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= NGBE_FLAGS_SP_INIT; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_CAPABLE; + adapter->flags2 |= NGBE_FLAG2_EEE_CAPABLE; + + /* init mailbox params */ + hw->mbx.ops.init_params(hw); + + /* default flow control settings */ + hw->fc.requested_mode = ngbe_fc_full; + hw->fc.current_mode = ngbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = NGBE_DEFAULT_TXD; + adapter->rx_ring_count = NGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + + /* PF holds first pool slot */ + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); + set_bit(__NGBE_DOWN, &adapter->state); + if (NGBE_LINK_RETRY == 1) + hw->restart_an = 0; + +out: + return err; +} + +/** + * ngbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ngbe_setup_tx_resources(struct ngbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ngbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i = 0, j = 0, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ngbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = ngbe_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx(XDP) Queue %u failed\n", j); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (j--) + ngbe_free_tx_resources(adapter->xdp_ring[j]); + while (i--) + ngbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} +#ifdef HAVE_XDP_BUFF_RXQ +static int ngbe_rx_napi_id(struct ngbe_ring *rx_ring) +{ + struct ngbe_q_vector *q_vector = rx_ring->q_vector; + + return q_vector ? q_vector->napi.napi_id : 0; +} +#endif +/** + * ngbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ngbe_setup_rx_resources(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ngbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + if (!rx_ring->q_vector) + return 0; +#ifdef HAVE_XDP_BUFF_RXQ + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, + ngbe_rx_napi_id(rx_ring)) < 0) + goto err; +#endif + rx_ring->xdp_prog = rx_ring->q_vector->adapter->xdp_prog; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ngbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) { + continue; + } + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ngbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * ngbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * NGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) { + e_err(probe, "ngbe_setup_isb_resources: alloc isb_mem failed\n"); + return -ENOMEM; + } + memset(adapter->isb_mem, 0, sizeof(u32) * NGBE_ISB_MAX); + return 0; +} + +/** + * ngbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static void ngbe_free_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * NGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +/** + * ngbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ngbe_free_tx_resources(struct ngbe_ring *tx_ring) +{ + ngbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * ngbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ngbe_free_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ngbe_free_tx_resources(adapter->xdp_ring[i]); +} + +/** + * ngbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ngbe_free_rx_resources(struct ngbe_ring *rx_ring) +{ + ngbe_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; + +#ifdef HAVE_XDP_BUFF_RXQ + if(rx_ring->q_vector) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +#endif + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ngbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ngbe_free_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ngbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +#endif + +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > (NGBE_MAX_JUMBO_FRAME_SIZE))) + return -EINVAL; +#else + if ((new_mtu < 68) || (new_mtu > 9414)) + return -EINVAL; + +#endif + + /* + * we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) +#else + (new_mtu > ETH_DATA_LEN)) +#endif + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +/** + * ngbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int ngbe_open(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__NGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ngbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ngbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = ngbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + ngbe_configure(adapter); + + err = ngbe_request_irq(adapter); + if (err) + goto err_req_irq; + + if (adapter->num_tx_queues) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); + if (err) + goto err_set_queues; + } + + if (adapter->num_rx_queues) { + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + +#ifdef HAVE_PTP_1588_CLOCK + ngbe_ptp_init(adapter); +#endif + + ngbe_up_complete(adapter); + + return 0; + +err_set_queues: + ngbe_free_irq(adapter); +err_req_irq: + ngbe_free_isb_resources(adapter); +err_req_isb: + ngbe_free_all_rx_resources(adapter); + +err_setup_rx: + ngbe_free_all_tx_resources(adapter); +err_setup_tx: + ngbe_reset(adapter); + return err; +} + +/** + * ngbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ngbe_close_suspend(struct ngbe_adapter *adapter) +{ +#ifdef HAVE_PTP_1588_CLOCK + ngbe_ptp_suspend(adapter); +#endif + ngbe_disable_device(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); + + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); +} + +/** + * ngbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int ngbe_close(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_PTP_1588_CLOCK + ngbe_ptp_stop(adapter); +#endif + + ngbe_down(adapter); + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); + + ngbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int ngbe_resume(struct device *dev) +#else +static int ngbe_resume(struct pci_dev *pdev) +#endif /* USE_LEGACY_PM_SUPPORT */ +{ + struct ngbe_adapter *adapter; + struct net_device *netdev; + u32 err; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + wr32(&adapter->hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + rtnl_lock(); + + err = ngbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ngbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} + +#ifndef USE_LEGACY_PM_SUPPORT +/** + * ngbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int ngbe_freeze(struct device *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ngbe_down(adapter); + ngbe_free_irq(adapter); + } + + ngbe_reset_interrupt_capability(adapter); + + return 0; +} + +/** + * ngbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int ngbe_thaw(struct device *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + ngbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = ngbe_request_irq(adapter); + if (err) + return err; + + ngbe_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif /* CONFIG_PM */ + +/* + * __ngbe_shutdown is not used when power manangbeent + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) +static int __ngbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + rtnl_lock(); + if (netif_running(netdev)) + ngbe_close_suspend(adapter); + rtnl_unlock(); + + ngbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + /* this won't stop link of managebility or WoL is enabled */ + ngbe_stop_mac_link_on_d3(hw); + + if (wufc) { + ngbe_set_rx_mode(netdev); + ngbe_configure_rx(adapter); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & NGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_MPE, NGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, NGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, NGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + *enable_wake = !!wufc; + ngbe_release_hw_control(adapter); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int ngbe_suspend(struct device *dev) +#else +static int ngbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +#endif /* USE_LEGACY_PM_SUPPORT */ +{ + int retval; + bool wake; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + + retval = __ngbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +#ifndef USE_REBOOT_NOTIFIER +static void ngbe_shutdown(struct pci_dev *pdev) +{ + bool wake = 0; + + __ngbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#endif +#ifdef HAVE_NDO_GET_STATS64 +/** + * ngbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ngbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void ngbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *ngbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ngbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} +#else +/** + * ngbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ngbe_get_stats(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + ngbe_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif + +/** + * ngbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ngbe_update_stats(struct ngbe_adapter *adapter) +{ + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; +#ifndef NGBE_NO_LRO + u32 flushed = 0, coal = 0; +#endif + + + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + +#ifndef NGBE_NO_LRO + for (i = 0; i < adapter->num_q_vectors; i++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[i]; + if (!q_vector) + continue; + flushed += q_vector->lrolist.stats.flushed; + coal += q_vector->lrolist.stats.coal; + } + adapter->lro_stats.flushed = flushed; + adapter->lro_stats.coal = coal; + + +#endif + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ngbe_ring *xdp_ring = adapter->xdp_ring[i]; + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + + hwstats->gprc += rd32(hw, NGBE_PX_GPRC); + + ngbe_update_xoff_rx_lfc(adapter); + + hwstats->o2bgptc += rd32(hw, NGBE_TDM_OS2BMC_CNT); + if (ngbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, NGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, NGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, NGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, NGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, NGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, NGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, NGBE_PX_GOTC_MSB) << 32; + + + adapter->hw_rx_no_dma_resources += + rd32(hw, NGBE_RDM_DRP_PKT); + bprc = rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + + for (i = 0; i < 8; i++) + hwstats->mprc += rd32(hw, NGBE_PX_MPRC(i)); + + hwstats->roc += rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, NGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, NGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, NGBE_PX_GPTC); + hwstats->mptc += rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + total_mpc = rd32(hw, NGBE_RDB_MPCNT); + net_stats->rx_missed_errors += total_mpc; + + /* + * VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__NGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(NGBE_VX_GPRC, \ + adapter->vfinfo->last_vfstats.gprc, \ + adapter->vfinfo->vfstats.gprc); + UPDATE_VF_COUNTER_32bit(NGBE_VX_GPTC, \ + adapter->vfinfo->last_vfstats.gptc, \ + adapter->vfinfo->vfstats.gptc); + UPDATE_VF_COUNTER_36bit(NGBE_VX_GORC_LSB, \ + NGBE_VX_GORC_MSB, \ + adapter->vfinfo->last_vfstats.gorc, \ + adapter->vfinfo->vfstats.gorc); + UPDATE_VF_COUNTER_36bit(NGBE_VX_GOTC_LSB, \ + NGBE_VX_GOTC_MSB, \ + adapter->vfinfo->last_vfstats.gotc, \ + adapter->vfinfo->vfstats.gotc); + UPDATE_VF_COUNTER_32bit(NGBE_VX_MPRC, \ + adapter->vfinfo->last_vfstats.mprc, \ + adapter->vfinfo->vfstats.mprc); + } + } + +} + +/** + * ngbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void ngbe_check_hang_subtask(struct ngbe_adapter *adapter) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); + } +} + +static void ngbe_watchdog_an_complete(struct ngbe_adapter *adapter) +{ + u32 link_speed = 0; + u32 lan_speed = 0; + bool link_up = true; + struct ngbe_hw *hw = &adapter->hw; + + if (!(adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + return; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + + + adapter->flags &= ~NGBE_FLAG_NEED_ANC_CHECK; + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + + return; +} + +/** + * ngbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ngbe_watchdog_update_link_status(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + u32 lan_speed = 0; + u32 reg; + + if (NGBE_POLL_LINK_STATUS != 1) { + if (!(adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE)) + return; + } + + link_speed = NGBE_LINK_SPEED_1GB_FULL; + link_up = true; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + if (NGBE_POLL_LINK_STATUS != 1) { + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + NGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + } + } else { + if (adapter->link_up == link_up && adapter->link_speed == link_speed) + return; + } + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_up) { + hw->mac.ops.fc_enable(hw); + ngbe_set_rx_drop_en(adapter); + } + + if (link_up) { + +#ifdef HAVE_PTP_1588_CLOCK + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + +#endif + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, NGBE_MAC_RX_CFG); + wr32(hw, NGBE_MAC_RX_CFG, reg); + wr32(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); + wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); + } + + adapter->link_up = link_up; + /* hw->mac.ops.dmac_config is null*/ + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.dmac_config.link_speed != link_speed || + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; + hw->mac.ops.dmac_config(hw); + } + } + return; +} + +static void ngbe_update_default_up(struct ngbe_adapter *adapter) +{ + u8 up = 0; + adapter->default_up = up; +} + +/** + * ngbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_link_is_up(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, NGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(NGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, NGBE_RDB_RFCC)); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == NGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == NGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == NGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + + + + netif_tx_wake_all_queues(netdev); +#ifdef HAVE_VIRTUAL_STATION + /* enable any upper devices */ + rtnl_lock(); + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + } + rtnl_unlock(); +#endif + /* update the default user priority for VFs */ + ngbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + ngbe_ping_all_vfs_with_link_status(adapter, adapter->link_up); +} + +/** + * ngbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void ngbe_watchdog_link_is_down(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + +#endif + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + ngbe_ping_all_vfs_with_link_status(adapter, adapter->link_up); +} + +static bool ngbe_ring_tx_pending(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ngbe_ring *xdp_ring = adapter->xdp_ring[i]; + + if (xdp_ring->next_to_use != xdp_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ngbe_vf_tx_pending(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + NGBE_PX_TR_RPn(q_per_pool, i, j)); + t = rd32(hw, + NGBE_PX_TR_WPn(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * ngbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_flush_tx(struct ngbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (ngbe_ring_tx_pending(adapter) || + ngbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with " + "pending Tx work\n"); + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void ngbe_issue_vf_flr(struct ngbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + + +static void ngbe_spoof_check(struct ngbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check if in non-IOV mode */ + if (adapter->num_vfs == 0) + return; + ssvpc = rd32(&adapter->hw, NGBE_TDM_SEC_DRP); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +#endif /* CONFIG_PCI_IOV */ + +/** + * ngbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_subtask(struct ngbe_adapter *adapter) +{ + + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_an_complete(adapter); + + if (NGBE_POLL_LINK_STATUS != 1) { + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); + } +#ifdef CONFIG_PCI_IOV + ngbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + + ngbe_update_stats(adapter); + + ngbe_watchdog_flush_tx(adapter); +} + +/** + * ngbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ngbe_service_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + struct ngbe_hw *hw = &adapter->hw; + u32 val = 0; + + /* poll faster when waiting for link */ + if ((adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE) || + (adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* flags to records which func to handle pcie recovery */ + if (rd32(&adapter->hw, NGBE_MIS_PF_SM) == 1) { + val = rd32m(&adapter->hw, NGBE_MIS_PRB_CTL, NGBE_MIS_PRB_CTL_LAN0_UP | + NGBE_MIS_PRB_CTL_LAN1_UP | + NGBE_MIS_PRB_CTL_LAN2_UP | + NGBE_MIS_PRB_CTL_LAN3_UP); + if (val & NGBE_MIS_PRB_CTL_LAN0_UP) { + if (hw->bus.lan_id == 0) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan0\n"); + } + } else if (val & NGBE_MIS_PRB_CTL_LAN1_UP) { + if (hw->bus.lan_id == 1) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan1\n"); + } + } else if (val & NGBE_MIS_PRB_CTL_LAN2_UP) { + if (hw->bus.lan_id == 2) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan2\n"); + } + } else if (val & NGBE_MIS_PRB_CTL_LAN3_UP) { + if (hw->bus.lan_id == 3) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "ngbe_service_timer: set recover on Lan3\n"); + } + } + } + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ngbe_service_event_schedule(adapter); +} + +static void ngbe_link_check_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, link_check_timer); + unsigned long next_event_offset = HZ / 1000; + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi) + next_event_offset = HZ / 10; + + mod_timer(&adapter->link_check_timer, next_event_offset + jiffies); + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); +} + +static void ngbe_reset_subtask(struct ngbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + + if (!(adapter->flags2 & (NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED | + NGBE_FLAG2_RESET_INTR_RECEIVED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_PF_RESET_REQUESTED; + } + + if (adapter->flags2 & NGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~NGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) { + adapter->hw.reset_type = NGBE_SW_RESET; + + } else if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) + adapter->hw.reset_type = NGBE_GLOBAL_RESET; + adapter->hw.force_full_reset = TRUE; + ngbe_reinit_locked(adapter); + adapter->hw.force_full_reset = FALSE; + goto unlock; + } + + if (reset_flag & NGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to open*/ + /*ngbe_dump(adapter);*/ + + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_SW_RST, NGBE_MIS_RST_SW_RST); + e_info(drv, "ngbe_reset_subtask: sw reset\n"); + + } else if (reset_flag & NGBE_FLAG2_PF_RESET_REQUESTED) { + /*debug to open*/ + /*ngbe_dump(adapter);*/ + ngbe_reinit_locked(adapter); + } else if (reset_flag & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to open*/ + /*ngbe_dump(adapter);*/ + pci_save_state(adapter->pdev); + if (ngbe_mng_present(&adapter->hw)) { + ngbe_reset_hostif(&adapter->hw); + e_info(drv, "ngbe_reset_subtask: lan reset\n"); + + } else { + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_GLOBAL_RST, + NGBE_MIS_RST_GLOBAL_RST); + e_info(drv, "ngbe_reset_subtask: global reset\n"); + } + } + +unlock: + rtnl_unlock(); +} + +static void ngbe_check_pcie_subtask(struct ngbe_adapter *adapter) +{ + if (!(adapter->flags2 & NGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + ngbe_print_tx_hang_status(adapter); + + wr32m(&adapter->hw, NGBE_MIS_PF_SM, NGBE_MIS_PF_SM_SM, 0); + if ((NGBE_PCIE_RECOVER == 1) && !(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) { + e_info(probe, "do recovery\n"); + ngbe_pcie_do_recovery(adapter->pdev); + } + adapter->flags2 &= ~NGBE_FLAG2_PCIE_NEED_RECOVER; +} + +/** + * ngbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ngbe_service_task(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, + struct ngbe_adapter, + service_task); + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ngbe_down(adapter); + rtnl_unlock(); + } + ngbe_service_event_complete(adapter); + return; + } + + ngbe_check_pcie_subtask(adapter); + ngbe_reset_subtask(adapter); + ngbe_check_overtemp_subtask(adapter); + ngbe_watchdog_subtask(adapter); + ngbe_check_hang_subtask(adapter); +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) { + ngbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + ngbe_ptp_rx_hang(adapter); + } +#endif /* HAVE_PTP_1588_CLOCK */ + + ngbe_service_event_complete(adapter); +} + +static u8 get_ipv6_proto(struct sk_buff *skb, int offset) +{ + struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); + u8 nexthdr = hdr->nexthdr; + + offset += sizeof(struct ipv6hdr); + + while (ipv6_ext_hdr(nexthdr)) { + struct ipv6_opt_hdr _hdr, *hp; + + if (nexthdr == NEXTHDR_NONE) + break; + + hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); + if (!hp) + break; + + if (nexthdr == NEXTHDR_FRAGMENT) { + break; + } else if (nexthdr == NEXTHDR_AUTH) { + offset += ipv6_authlen(hp); + } else { + offset += ipv6_optlen(hp); + } + + nexthdr = hp->nexthdr; + } + + return nexthdr; +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static ngbe_dptype encode_tx_desc_ptype(const struct ngbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; +#endif + u8 l4_prot = 0; + u8 ptype = 0; + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + tun_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= NGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_prot = get_ipv6_proto(skb, + skb_inner_network_offset(skb)); + ptype |= NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = NGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + l4_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + ptype = NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#endif /* NETIF_F_IPV6_CSUM */ + case __constant_htons(ETH_P_1588): + ptype = NGBE_PTYPE_L2_TS; + goto exit; + case __constant_htons(ETH_P_FIP): + ptype = NGBE_PTYPE_L2_FIP; + goto exit; + case __constant_htons(NGBE_ETH_P_LLDP): + ptype = NGBE_PTYPE_L2_LLDP; + goto exit; + case __constant_htons(NGBE_ETH_P_CNM): + ptype = NGBE_PTYPE_L2_CNM; + goto exit; + case __constant_htons(ETH_P_PAE): + ptype = NGBE_PTYPE_L2_EAPOL; + goto exit; + case __constant_htons(ETH_P_ARP): + ptype = NGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = NGBE_PTYPE_L2_MAC; + goto exit; + } +#ifdef HAVE_ENCAP_TSO_OFFLOAD + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= NGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= NGBE_PTYPE_TYP_UDP; + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + ptype |= NGBE_PTYPE_TYP_SCTP; + break; +#endif /* HAVE_SCTP */ + default: + ptype |= NGBE_PTYPE_TYP_IP; + break; + } + +exit: + return ngbe_decode_ptype(ptype); +} + +static int ngbe_tso(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + u8 *hdr_len, ngbe_dptype dptype) +{ +#ifndef NETIF_F_TSO + return 0; +#else + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; + bool enc = skb->encapsulation; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ +#ifdef NETIF_F_TSO6 + struct ipv6hdr *ipv6h; +#endif + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); +#else + iph = ip_hdr(skb); +#endif + if (iph->version == 4) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); +#else + tcph = tcp_hdr(skb); +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_IPV4 | + NGBE_TX_FLAGS_CC; + +#ifdef NETIF_F_TSO6 + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); +#else + ipv6h = ipv6_hdr(skb); + tcph = tcp_hdr(skb); +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_CC; +#endif /* NETIF_F_TSO6 */ + } + + /* compute header lengths */ +#ifdef HAVE_ENCAP_TSO_OFFLOAD + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; +#else + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << NGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << NGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (enc) { + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= NGBE_TX_FLAGS_OUTER_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else + vlan_macip_lens = skb_network_header_len(skb) >> 1; +#else + vlan_macip_lens = skb_network_header_len(skb) >> 1; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= skb_network_offset(skb) << NGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= NGBE_SET_FLAG(first->tx_flags, + NGBE_TX_FLAGS_HW_VLAN, + 0x1 << NGBE_TXD_TAG_TPID_SEL_SHIFT); +#endif + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +#endif /* !NETIF_F_TSO */ +} + +static void ngbe_tx_csum(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, ngbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; +#endif + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & NGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & NGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv6->nexthdr; + break; + default: + break; + } + +#else /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_prot = ip_hdr(skb)->protocol; + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_prot = ipv6_hdr(skb)->nexthdr; + break; +#endif /* NETIF_F_IPV6_CSUM */ + default: + break; + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_prot) { + case IPPROTO_TCP: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + NGBE_TXD_L4LEN_SHIFT; +#else + mss_l4len_idx = tcp_hdrlen(skb) << + NGBE_TXD_L4LEN_SHIFT; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; +#endif /* HAVE_SCTP */ + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; + default: + break; + } + + /* update TX checksum flag */ + first->tx_flags |= NGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= NGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ +#ifndef HAVE_ENCAP_TSO_OFFLOAD + vlan_macip_lens |= skb_network_offset(skb) << NGBE_TXD_MACLEN_SHIFT; +#endif /* !HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= NGBE_SET_FLAG(first->tx_flags, + NGBE_TX_FLAGS_HW_VLAN, + 0x1 << NGBE_TXD_TAG_TPID_SEL_SHIFT); +#endif + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +static u32 ngbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = NGBE_TXD_DTYP_DATA | + NGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_HW_VLAN, + NGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSO, + NGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSTAMP, + NGBE_TXD_MAC_TSTAMP); + + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_LINKSEC, + NGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void ngbe_tx_olinfo_status(union ngbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << NGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CSUM, + NGBE_TXD_L4CS); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPV4, + NGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_OUTER_IPV4, + NGBE_TXD_EIPCS); + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CC, + NGBE_TXD_CC); + + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPSEC, + NGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ngbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + if (likely(ngbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ngbe_maybe_stop_tx(tx_ring, size); +} + + +static int ngbe_tx_map(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = ngbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = NGBE_TX_DESC(tx_ring, i); + + ngbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > NGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ NGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += NGBE_MAX_DATA_PER_TXD; + size -= NGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | NGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + ngbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused struct net_device *sb_dev, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 ngbe_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ +{ + int txq; + + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case __constant_htons(ETH_P_FIP): + fallthrough; + default: +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + return netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + return fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + return fallback(dev, skb); +#else + return __netdev_pick_tx(dev, skb); +#endif + } + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); + + + return txq; +} +#endif /* CONFIG_FCOE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *skb, + struct ngbe_adapter __maybe_unused *adapter, + struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + ngbe_dptype dptype; + + /* + * need: 1 descriptor per page * PAGE_SIZE/NGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/NGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (ngbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << NGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= NGBE_TX_FLAGS_HW_VLAN; + } + + if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= NGBE_TX_FLAGS_SW_VLAN; + } + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_tx(skb)->hardware) && + adapter->ptp_clock) { + if(!test_and_set_bit_lock(__NGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_tx(skb)->in_progress = 1; +#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__NGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + tx_flags |= NGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + +#endif +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= NGBE_TX_FLAGS_CC; + +#endif + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + + tso = ngbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + ngbe_tx_csum(tx_ring, first, dptype); + + ngbe_tx_map(tx_ring, first, hdr_len); + +#ifndef HAVE_TRANS_START_IN_QUEUE + tx_ring->netdev->trans_start = jiffies; +#endif + +#ifndef HAVE_SKB_XMIT_MORE + ngbe_maybe_stop_tx(tx_ring, DESC_NEEDED); +#endif + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + e_dev_err("ngbe_xmit_frame_ring drop \n"); + + return NETDEV_TX_OK; + + +} + +static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + +#ifdef HAVE_TX_MQ + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif + + return ngbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ngbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_set_mac(struct net_device *netdev, void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ngbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ngbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); + + return 0; +} + +static int ngbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ +#ifdef HAVE_PTP_1588_CLOCK + struct ngbe_adapter *adapter = netdev_priv(netdev); + +#endif + switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return ngbe_ptp_get_ts_config(adapter, ifr); +#endif + case SIOCSHWTSTAMP: + return ngbe_ptp_set_ts_config(adapter, ifr); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ngbe_netpoll(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state)) + return; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + ngbe_msix_clean_rings(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; + } + } else { + ngbe_intr(0, adapter); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +void ngbe_save_ring_stats(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *ring; + int i = 0; + + memset(adapter->old_tx_qstats, 0, sizeof(struct ngbe_queue_stats)*MAX_TX_QUEUES); + memset(adapter->old_tx_stats, 0, sizeof(struct ngbe_tx_queue_stats) * MAX_TX_QUEUES); + memset(adapter->old_rx_qstats, 0, sizeof(struct ngbe_queue_stats)*MAX_RX_QUEUES); + memset(adapter->old_rx_stats, 0, sizeof(struct ngbe_rx_queue_stats)*MAX_RX_QUEUES); + + for (;i < adapter->num_q_vectors;i++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[i]; + ngbe_for_each_ring(ring, q_vector->tx) { + + adapter->old_tx_qstats[i].packets += ring->stats.packets; + adapter->old_tx_qstats[i].bytes += ring->stats.bytes; +#ifdef BP_EXTENDED_STATS + adapter->old_tx_qstats[i].yields += ring->stats.yields; + adapter->old_tx_qstats[i].misses += ring->stats.misses; + adapter->old_tx_qstats[i].cleaned += ring->stats.cleaned; +#endif + adapter->old_tx_stats[i].restart_queue += ring->tx_stats.restart_queue; + adapter->old_tx_stats[i].tx_busy += ring->tx_stats.tx_busy; + adapter->old_tx_stats[i].tx_done_old += ring->tx_stats.tx_done_old; + } + ngbe_for_each_ring(ring, q_vector->rx) + { + adapter->old_rx_qstats[i] = ring->stats; + adapter->old_rx_stats[i] = ring->rx_stats; + } + } +} + +void ngbe_set_ring_stats(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *ring; + int i = 0; + + for (;i < adapter->num_q_vectors;i++) { + ring = adapter->q_vector[i]->tx.ring; + ring->stats = adapter->old_tx_qstats[i]; + ring->tx_stats = adapter->old_tx_stats[i]; + ring = adapter->q_vector[i]->rx.ring; + ring->stats = adapter->old_rx_qstats[i]; + ring->rx_stats = adapter->old_rx_stats[i]; + } +} + +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc, bool save_stats) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + if (save_stats) + ngbe_save_ring_stats(adapter); + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (adapter->xdp_prog) { + if (adapter->num_rx_queues > MAX_RX_QUEUES / 2) { + adapter->old_rss_limit = adapter->ring_feature[RING_F_RSS].limit; + adapter->ring_feature[RING_F_RSS].limit = MAX_RX_QUEUES / 2; + e_dev_info("limit tx rx ring to 4 " + "because xdpring take up half of the txring"); + } + else { + adapter->old_rss_limit = 0; + } + } + + if (netif_running(dev)) + ngbe_close(dev); + else + ngbe_reset(adapter); + + ngbe_clear_interrupt_scheme(adapter); + + if (tc) { + netdev_set_num_tc(dev, tc); + } else { + netdev_reset_tc(dev); + } + ngbe_init_interrupt_scheme(adapter); + if (save_stats) + ngbe_set_ring_stats(adapter); + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ngbe_sriov_reinit(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ngbe_setup_tc(netdev, netdev_get_num_tc(netdev), 0); + rtnl_unlock(); +} +#endif + +void ngbe_do_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); +} + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 ngbe_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t ngbe_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + +#ifdef NGBE_NO_LRO + /* Turn off LRO if not RSC capable */ + features &= ~NETIF_F_LRO; +#endif + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + else + features |= NETIF_F_HW_VLAN_STAG_RX; + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + else + features |= NETIF_F_HW_VLAN_STAG_TX; +#endif + return features; +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int ngbe_set_features(struct net_device *netdev, u32 features) +#else +static int ngbe_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_STAG_RX)) +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if (features & NETIF_F_HW_VLAN_STAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & NGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, ~NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~NGBE_FLAG2_RSS_ENABLED; + } + } + + + if (need_reset) + ngbe_do_reset(netdev); + + return 0; + +} +#endif /* HAVE_NDO_SET_FEATURES */ + + +#ifdef HAVE_NDO_GSO_CHECK +static bool +ngbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) +{ + return vxlan_gso_check(skb); +} +#endif /* HAVE_NDO_GSO_CHECK */ + +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +static int ngbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, +#ifdef HAVE_NDO_FDB_ADD_VID + u16 vid, +#endif +#ifdef HAVE_NDO_FDB_ADD_EXTACK + u16 flags, + struct netlink_ext_ack __always_unused *extack) +#else + u16 flags) +#endif +#else +static int ngbe_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +#endif /* USE_CONST_DEV_UC_CHAR */ +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (NGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; + } + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_NDO_FDB_ADD_VID + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +#else + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); +#endif /* HAVE_NDO_FDB_ADD_VID */ +#else + return ndo_dflt_fdb_add(ndm, dev, addr, flags); +#endif /* USE_CONST_DEV_UC_CHAR */ +} + +#ifdef HAVE_BRIDGE_ATTRIBS +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS) +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags) +#else +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + return -EINVAL; + } + + adapter->bridge_mode = mode; + + /* re-configure settings related to bridge mode */ + ngbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined (NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +#ifdef HAVE_NDO_FEATURES_CHECK +#define NGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t ngbe_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) { + vlan_num++; + } + + if (vlan_depth) { + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + NGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + return features; +} +#endif /* HAVE_NDO_FEATURES_CHECK */ + + +#ifdef HAVE_XDP_SUPPORT +static int ngbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct ngbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + bool need_reset; + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & NGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + if (adapter->xdp_prog && prog) { + e_dev_err("XDP can't be active at the same time"); + return -EBUSY; + } + + /* verify ngbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + + if (frame_size > ngbe_rx_bufsz(ring)) + return -EINVAL; + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* If transitioning XDP modes reconfigure rings */ + if (need_reset) { + int err = 0; + if (!adapter->xdp_prog && adapter->old_rss_limit) + adapter->ring_feature[RING_F_RSS].limit = adapter->old_rss_limit; + + err = ngbe_setup_tc(dev, netdev_get_num_tc(dev), 1); + + if (err) { + rcu_assign_pointer(adapter->xdp_prog, old_prog); + return -EINVAL; + } + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + if (adapter->xdp_prog) + e_dev_info("xdp program is setup"); + else + e_dev_info("xdp program not load"); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + + +#ifdef HAVE_NDO_BPF +static int ngbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) +#else +static int ngbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +#endif +{ +#ifdef HAVE_XDP_QUERY_PROG + struct ngbe_adapter *adapter = netdev_priv(dev); +#endif + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ngbe_xdp_setup(dev, xdp->prog); +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = !!(adapter->xdp_prog); +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + return 0; +#endif + default: + return -EINVAL; + } +} +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +static int ngbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +#else +static int ngbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +#endif +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct ngbe_ring *ring; +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + int drops = 0; + int i; +#else + int err; +#endif + if (unlikely(test_bit(__NGBE_DOWN, &adapter->state))) + return -ENETDOWN; + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; +#endif + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return -ENXIO; +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = ngbe_xmit_xdp_ring(ring, xdpf); + if (err != NGBE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (unlikely(flags & XDP_XMIT_FLUSH)){ + wmb(); + writel(ring->next_to_use, ring->tail); + } + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + return n - drops; +#else /* HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS */ + + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + err = ngbe_xmit_xdp_ring(ring, xdp); + if (static_branch_unlikely(&ngbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + if (err != NGBE_XDP_TX) + return -ENOSPC; + + return 0; +#endif +} + +#ifndef NO_NDO_XDP_FLUSH +static void ngbe_xdp_flush(struct net_device *dev) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct ngbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__NGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return; + + wmb(); + writel(ring->next_to_use, ring->tail); + + return; +} +#endif /* !NO_NDO_XDP_FLUSH */ + +#endif /* HAVE_XDP_SUPPORT */ + +#ifdef HAVE_VIRTUAL_STATION +static inline int ngbe_inc_vmdqs(struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + + if (++adapter->num_vmdqs > 1 || adapter->num_vfs > 0) + adapter->flags |= NGBE_FLAG_VMDQ_ENABLED | + NGBE_FLAG_SRIOV_ENABLED; + accel->index = find_first_zero_bit(&adapter->fwd_bitmask, + NGBE_MAX_MACVLANS); + set_bit(accel->index, &adapter->fwd_bitmask); + + return 1 + find_last_bit(&adapter->fwd_bitmask, NGBE_MAX_MACVLANS); +} + +static inline int ngbe_dec_vmdqs(struct ngbe_fwd_adapter *accel) +{ + struct ngbe_adapter *adapter = accel->adapter; + + if (--adapter->num_vmdqs == 1 && adapter->num_vfs == 0) + adapter->flags &= ~(NGBE_FLAG_VMDQ_ENABLED | + NGBE_FLAG_SRIOV_ENABLED); + clear_bit(accel->index, &adapter->fwd_bitmask); + + return 1 + find_last_bit(&adapter->fwd_bitmask, NGBE_MAX_MACVLANS); +} + +static void *ngbe_fwd_add(struct net_device *pdev, struct net_device *vdev) +{ + struct ngbe_fwd_adapter *accel = NULL; + struct ngbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_vmdqs; + int err; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return ERR_PTR(-EPERM); + + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * than the available number of pools. + */ + if (used_pools >= NGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_RPS + if (vdev->num_rx_queues != vdev->num_tx_queues) { + netdev_info(pdev, "%s: Only supports a single queue count for " + "TX and RX\n", + vdev->name); + return ERR_PTR(-EINVAL); + } +#endif + /* Check for hardware restriction on number of rx/tx queues */ + if (vdev->num_tx_queues != 2 && vdev->num_tx_queues != 4) { + netdev_info(pdev, + "%s: Supports RX/TX Queue counts 2, and 4\n", + pdev->name); + return ERR_PTR(-EINVAL); + } - netdev = wx->netdev; - rtnl_lock(); - netif_device_detach(netdev); + accel = kzalloc(sizeof(*accel), GFP_KERNEL); + if (!accel) + return ERR_PTR(-ENOMEM); + accel->adapter = adapter; - if (netif_running(netdev)) - ngbe_close(netdev); - wx_clear_interrupt_scheme(wx); - rtnl_unlock(); + /* Enable VMDq flag so device will be set in VM mode */ + adapter->ring_feature[RING_F_VMDQ].limit = ngbe_inc_vmdqs(accel); + adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; - if (wufc) { - wx_set_rx_mode(netdev); - wx_configure_rx(wx); - wr32(wx, NGBE_PSR_WKUP_CTL, wufc); - } else { - wr32(wx, NGBE_PSR_WKUP_CTL, 0); - } - pci_wake_from_d3(pdev, !!wufc); - *enable_wake = !!wufc; - wx_control_hw(wx, false); + /* Force reinit of ring allocation with VMDQ enabled */ + err = ngbe_setup_tc(pdev, netdev_get_num_tc(pdev), 0); + if (err) + goto fwd_add_err; + + err = ngbe_fwd_ring_up(vdev, accel); + if (err) + goto fwd_add_err; - pci_disable_device(pdev); + netif_tx_start_all_queues(vdev); + return accel; +fwd_add_err: + /* unwind counter and free adapter struct */ + netdev_info(pdev, + "%s: dfwd hardware acceleration failed\n", vdev->name); + ngbe_dec_vmdqs(accel); + kfree(accel); + return ERR_PTR(err); } -static void ngbe_shutdown(struct pci_dev *pdev) +static void ngbe_fwd_del(struct net_device *pdev, void *fwd_priv) { - struct wx *wx = pci_get_drvdata(pdev); - bool wake; - - wake = !!wx->wol; + struct ngbe_fwd_adapter *accel = fwd_priv; + struct ngbe_adapter *adapter = accel->adapter; - ngbe_dev_shutdown(pdev, &wake); + if (!accel || adapter->num_vmdqs <= 1) + return; - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); - } + adapter->ring_feature[RING_F_VMDQ].limit = ngbe_dec_vmdqs(accel); + ngbe_fwd_ring_down(accel->vdev, accel); + ngbe_setup_tc(pdev, netdev_get_num_tc(pdev), 0); + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->index, adapter->num_vmdqs, + accel->rx_base_queue, + accel->rx_base_queue + adapter->queues_per_pool, + adapter->fwd_bitmask); + kfree(accel); } +#endif /*HAVE_VIRTUAL_STATION*/ + +#ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops ngbe_netdev_ops = { .ndo_open = ngbe_open, .ndo_stop = ngbe_close, - .ndo_change_mtu = wx_change_mtu, - .ndo_start_xmit = wx_xmit_frame, - .ndo_set_rx_mode = wx_set_rx_mode, - .ndo_set_features = wx_set_features, + .ndo_start_xmit = ngbe_xmit_frame, +#if IS_ENABLED(CONFIG_FCOE) + .ndo_select_queue = ngbe_select_queue, +#else +#ifndef HAVE_MQPRIO + .ndo_select_queue = __netdev_pick_tx, +#endif +#endif + .ndo_set_rx_mode = ngbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = wx_set_mac, - .ndo_get_stats64 = wx_get_stats64, - .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, + .ndo_set_mac_address = ngbe_set_mac, +#ifdef CENTOS_MTU_PORT_UPDATE + .ndo_change_mtu_rh74 = ngbe_change_mtu, +#else + .ndo_change_mtu = ngbe_change_mtu, +#endif + .ndo_tx_timeout = ngbe_tx_timeout, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + .ndo_vlan_rx_add_vid = ngbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ngbe_vlan_rx_kill_vid, +#endif + .ndo_do_ioctl = ngbe_ioctl, +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = ngbe_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = ngbe_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = ngbe_ndo_set_vf_vlan, +#endif + +/* not support by emerald */ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = ngbe_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = ngbe_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = ngbe_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = ngbe_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = ngbe_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = ngbe_ndo_get_vf_config, +#endif +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = ngbe_get_stats64, +#else + .ndo_get_stats = ngbe_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ngbe_netpoll, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = ngbe_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = &ngbe_vlan_mode, +#endif +#ifdef HAVE_FDB_OPS + .ndo_fdb_add = ngbe_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = ndo_dflt_fdb_del, + .ndo_fdb_dump = ndo_dflt_fdb_dump, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = ngbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ngbe_ndo_bridge_getlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif +#ifdef HAVE_VIRTUAL_STATION + .ndo_dfwd_add_station = ngbe_fwd_add, + .ndo_dfwd_del_station = ngbe_fwd_del, +#endif +#ifdef HAVE_NDO_GSO_CHECK + .ndo_gso_check = ngbe_gso_check, +#endif /* HAVE_NDO_GSO_CHECK */ +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = ngbe_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF + .ndo_bpf = ngbe_xdp, +#else + .ndo_xdp = ngbe_xdp, +#endif + .ndo_xdp_xmit = ngbe_xdp_xmit, +#ifndef NO_NDO_XDP_FLUSH + .ndo_xdp_flush = ngbe_xdp_flush, +#endif /* !NO_NDO_XDP_FLUSH */ +#endif +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext ngbe_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = ngbe_set_features, + .ndo_fix_features = ngbe_fix_features, +#endif /* HAVE_NDO_SET_FEATURES */ }; +#endif /* HAVE_NET_DEVICE_OPS */ + +void ngbe_assign_netdev_ops(struct net_device *dev) +{ +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops = &ngbe_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(dev, &ngbe_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = &ngbe_open; + dev->stop = &ngbe_close; + dev->hard_start_xmit = &ngbe_xmit_frame; + dev->get_stats = &ngbe_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &ngbe_set_rx_mode; +#endif + dev->set_multicast_list = &ngbe_set_rx_mode; + dev->set_mac_address = &ngbe_set_mac; + dev->change_mtu = &ngbe_change_mtu; + dev->do_ioctl = &ngbe_ioctl; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &ngbe_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + dev->vlan_rx_register = &ngbe_vlan_mode; + dev->vlan_rx_add_vid = &ngbe_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &ngbe_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &ngbe_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + dev->select_queue = &ngbe_select_queue; +#else + dev->select_queue = &__netdev_pick_tx; +#endif +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* HAVE_NET_DEVICE_OPS */ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(dev)->ndo_busy_poll = ngbe_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + + ngbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * ngbe_wol_supported - Check whether device supports WoL + * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ngbe_wol_supported(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* check eeprom to see if WOL is enabled */ + if ((hw->bus.func == 0) || + (hw->bus.func == 1) || + (hw->bus.func == 2) || + (hw->bus.func == 3) ) + return true; + else + return false; +} + /** * ngbe_probe - Device Initialization Routine @@ -499,201 +9374,525 @@ static const struct net_device_ops ngbe_netdev_ops = { * * Returns 0 on success, negative on failure * - * ngbe_probe initializes an wx identified by a pci_dev structure. - * The OS initialization, configuring of the wx private structure, + * ngbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int ngbe_probe(struct pci_dev *pdev, - const struct pci_device_id __always_unused *ent) +static int __devinit ngbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) { struct net_device *netdev; - u32 e2rom_cksum_cap = 0; - struct wx *wx = NULL; - static int func_nums; - u16 e2rom_ver = 0; + struct ngbe_adapter *adapter = NULL; + struct ngbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + u32 eeprom_verl = 0; u32 etrack_id = 0; - u32 saved_ver = 0; - int err; + char *info_string, *i_s_var; + u32 eeprom_cksum_devcap = 0; + u32 saved_version = 0; + u32 devcap; + u32 led_conf = 0; + + bool disable_dev = false; +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + u32 hw_features; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_FEATURES */ + u16 pvalue = 0; err = pci_enable_device_mem(pdev); if (err) return err; - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_pci_disable_dev; + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; } err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - ngbe_driver_name); + pci_select_bars(pdev, IORESOURCE_MEM), + ngbe_driver_name); if (err) { - dev_err(&pdev->dev, - "pci_request_selected_regions failed %d\n", err); - goto err_pci_disable_dev; + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; } pci_set_master(pdev); - netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct wx), - NGBE_MAX_TX_QUEUES, - NGBE_MAX_RX_QUEUES); + /* errata 16 */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + +#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct ngbe_adapter), NGBE_MAX_TX_QUEUES); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct ngbe_adapter)); +#endif /* HAVE_TX_MQ */ if (!netdev) { err = -ENOMEM; - goto err_pci_release_regions; + goto err_alloc_etherdev; } - SET_NETDEV_DEV(netdev, &pdev->dev); + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - wx = netdev_priv(netdev); - wx->netdev = netdev; - wx->pdev = pdev; - wx->msg_enable = BIT(3) - 1; + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); - wx->hw_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!wx->hw_addr) { + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { err = -EIO; - goto err_pci_release_regions; + goto err_ioremap; } - wx->driver_name = ngbe_driver_name; - ngbe_set_ethtool_ops(netdev); - netdev->netdev_ops = &ngbe_netdev_ops; - - netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXHASH | NETIF_F_RXCSUM; - netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID; - netdev->vlan_features |= netdev->features; - netdev->features |= NETIF_F_IPV6_CSUM | NETIF_F_VLAN_FEATURES; - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features | NETIF_F_RXALL; - netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - netdev->features |= NETIF_F_HIGHDMA; - netdev->hw_features |= NETIF_F_GRO; - netdev->features |= NETIF_F_GRO; + /* default config: 10/100/1000M autoneg on */ + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG; + hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN; + /* assign netdev ops and ethtool ops */ + ngbe_assign_netdev_ops(netdev); - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->priv_flags |= IFF_SUPP_NOFCS; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + adapter->bd_number = cards_found; - wx->bd_number = func_nums; /* setup the private structure */ - err = ngbe_sw_init(wx); + err = ngbe_sw_init(adapter); if (err) - goto err_pci_release_regions; + goto err_sw_init; + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + ngbe_check_options(adapter); + + hw->mac.ops.set_lan_id(hw); /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PERST); if (err) - goto err_free_mac_table; - err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST); + goto err_sw_init; + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PWRRST); if (err) - goto err_free_mac_table; + goto err_sw_init; + + if(ngbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); + /* reset_hw fills in the perm_addr as well */ - err = wx_mng_present(wx); + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; if (err) { - dev_err(&pdev->dev, "Management capability is not present\n"); - goto err_free_mac_table; + e_dev_err("HW reset failed: %d\n", err); + goto err_sw_init; } - err = ngbe_reset_hw(wx); - if (err) { - dev_err(&pdev->dev, "HW Init failed: %d\n", err); - goto err_free_mac_table; + err = hw->phy.ops.init(hw); + if (err) + goto err_sw_init; + + netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; +#endif + netdev->features |= ngbe_tso_features(); +#ifdef NETIF_F_RXHASH + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + netdev->features |= NETIF_F_RXHASH; +#endif + netdev->features |= NETIF_F_RXCSUM; +#ifdef HAVE_VIRTUAL_STATION + netdev->features |= NETIF_F_HW_L2FW_DOFFLOAD; +#endif +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif + hw_features |= netdev->features; + +#else /* !HAVE_NDO_SET_FEATURES */ + +#ifdef NETIF_F_GRO + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_FILTER; +#endif + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->features |= NETIF_F_NTUPLE; +#ifdef HAVE_NDO_SET_FEATURES + hw_features |= NETIF_F_SCTP_CSUM | NETIF_F_NTUPLE; +#endif + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif + +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS + netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9414 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#endif +#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_HIGHDMA; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ } - if (wx->bus.func == 0) { - wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0); - wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + if (hw->bus.lan_id == 0) { + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, 0x0); } else { - e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS); - saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG); + eeprom_cksum_devcap = rd32(hw, NGBE_CALSUM_CAP_STATUS); + saved_version = rd32(hw, NGBE_EEPROM_VERSION_STORE_REG); } - wx_init_eeprom_params(wx); - if (wx->bus.func == 0 || e2rom_cksum_cap == 0) { - /* make sure the EEPROM is ready */ - err = ngbe_eeprom_chksum_hostif(wx); - if (err) { - dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); + hw->eeprom.ops.init_params(hw); + hw->mac.ops.release_swfw_sync(hw, NGBE_MNG_SWFW_SYNC_SW_MB); + if(hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + /* make sure the EEPROM is good */ + if (hw->eeprom.ops.eeprom_chksum_cap_st(hw, NGBE_CALSUM_COMMAND, &devcap)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; - goto err_free_mac_table; + goto err_sw_init; } } - wx->wol = 0; - if (wx->wol_hw_supported) - wx->wol = NGBE_PSR_WKUP_CTL_MAG; + if (hw->eeprom.ops.phy_led_oem_chk(hw, &led_conf)) { + adapter->led_conf = -1; + } else { + adapter->led_conf = led_conf; + } + + eth_hw_addr_set(netdev, hw->mac.perm_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, ngbe_service_timer, 0); + if (NGBE_POLL_LINK_STATUS == 1) + timer_setup(&adapter->link_check_timer, ngbe_link_check_timer, 0); + + if (NGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, ngbe_service_task); + set_bit(__NGBE_SERVICE_INITED, &adapter->state); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); + + err = ngbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + +#ifdef CONFIG_PCI_IOV +#ifdef HAVE_SRIOV_CONFIGURE + if (adapter->num_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module " + "parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x" + "/sriov_numvfs\n", + adapter->num_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + } + +#endif + if (adapter->flags & NGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT); + ngbe_enable_sriov(adapter); + } +#endif /* CONFIG_PCI_IOV */ + + /* WOL not supported for all devices */ + adapter->wol = 0; + if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, + &adapter->eeprom_cap); + /*only support in LAN0*/ + adapter->eeprom_cap = NGBE_DEVICE_CAPS_WOL_PORT0; + } else { + adapter->eeprom_cap = eeprom_cksum_devcap & 0xffff; + } + if ( ngbe_wol_supported(adapter) ) + adapter->wol = NGBE_PSR_WKUP_CTL_MAG; + if ( (hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP){ + /*enable wol first in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, 0xa50F); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + } + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); - netdev->wol_enabled = !!(wx->wol); - wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, wx->wol); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); - /* Save off EEPROM version number and Option Rom version which + /* + * Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom */ - if (saved_ver) { - etrack_id = saved_ver; - } else { - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H, - &e2rom_ver); - etrack_id = e2rom_ver << 16; - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, - &e2rom_ver); - etrack_id |= e2rom_ver; - wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + if(hw->bus.lan_id == 0 || saved_version == 0){ + hw->eeprom.ops.read32(hw, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x10000 | (u32)adapter->eeprom_cap); + }else if(eeprom_cksum_devcap) { + etrack_id = saved_version; + }else { + hw->eeprom.ops.read32(hw, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; } - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), - "0x%08x", etrack_id); - eth_hw_addr_set(netdev, wx->mac.perm_addr); - wx_mac_set_default_filter(wx, wx->mac.perm_addr); + /* Make sure offset to SCSI block is valid */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); - err = wx_init_interrupt_scheme(wx); - if (err) - goto err_free_mac_table; - /* phy Interface Configuration */ - err = ngbe_mdio_init(wx); - if (err) - goto err_clear_interrupt_scheme; + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + if (err == NGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + } else if (err) { + e_dev_err("HW init failed, err = %d\n", err); + goto err_register; + } + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + + strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; - pci_set_drvdata(pdev, wx); + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); +#endif + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + + + + /* print all messages at the end so that we use our eth%d name */ + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. */ + + expected_gts = ngbe_enumerate_functions(adapter) * 10; + + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ngbe_check_minimum_link(adapter, expected_gts); + + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF); + + if (hw->ncsi_enabled) + e_info(probe, "NCSI : support"); + else + e_info(probe, "NCSI : unsupported"); + + e_info(probe, "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", + hw->phy.type == ngbe_phy_internal?"Internal":"External"); + + e_info(probe, "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); + if (adapter->flags & NGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); +#ifndef NGBE_NO_LRO + else if (netdev->features & NETIF_F_LRO) + i_s_var += sprintf(i_s_var, "LRO "); +#endif + + BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: + +#ifdef CONFIG_PCI_IOV + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) + ngbe_vf_configuration(pdev, (i | 0x10000000)); + } +#endif + + e_info(probe, "WangXun(R) Gigabit Network Connection\n"); + cards_found++; + +#ifdef NGBE_SYSFS + if (ngbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#else +#ifdef NGBE_PROCFS + if (ngbe_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* NGBE_PROCFS */ +#endif /* NGBE_SYSFS */ + - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_adapter_init(adapter); +#endif /* HAVE_NGBE_DEBUG_FS */ + + if (NGBE_DIS_COMP_TIMEOUT == 1) { + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &pvalue); + pvalue = pvalue | 0x10; + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, pvalue); + } return 0; err_register: - wx_control_hw(wx, false); -err_clear_interrupt_scheme: - wx_clear_interrupt_scheme(wx); -err_free_mac_table: - kfree(wx->rss_key); - kfree(wx->mac_table); -err_pci_release_regions: + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); +err_sw_init: +#ifdef CONFIG_PCI_IOV + ngbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); -err_pci_disable_dev: - pci_disable_device(pdev); + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; } @@ -706,74 +9905,420 @@ static int ngbe_probe(struct pci_dev *pdev, * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void ngbe_remove(struct pci_dev *pdev) +static void __devexit ngbe_remove(struct pci_dev *pdev) { - struct wx *wx = pci_get_drvdata(pdev); + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; + struct ngbe_hw *hw; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + hw = &adapter->hw; + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + netdev = adapter->netdev; +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_adapter_exit(adapter); +#endif + + set_bit(__NGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + + +#ifdef NGBE_SYSFS + ngbe_sysfs_exit(adapter); +#else +#ifdef NGBE_PROCFS + ngbe_procfs_exit(adapter); +#endif +#endif /* NGBE-SYSFS */ + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + +#ifdef CONFIG_PCI_IOV + ngbe_disable_sriov(adapter); +#endif - netdev = wx->netdev; - unregister_netdev(netdev); + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); + + iounmap(adapter->io_addr); pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); - kfree(wx->mac_table); - wx_clear_interrupt_scheme(wx); + kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + free_netdev(netdev); - pci_disable_device(pdev); + if (disable_dev) + pci_disable_device(pdev); } -static int ngbe_suspend(struct pci_dev *pdev, pm_message_t state) +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev) { - bool wake; + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == NGBE_FAILED_READ_CFG_WORD) { + ngbe_remove_adapter(hw); + return true; + } + return false; +} - ngbe_dev_shutdown(pdev, &wake); - device_set_wakeup_enable(&pdev->dev, wake); +u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg) +{ + struct ngbe_adapter *adapter = hw->back; + u16 value; - return 0; + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_WORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_WORD; + return value; } -static int ngbe_resume(struct pci_dev *pdev) +#ifdef HAVE_PCI_ERS +#ifdef CONFIG_PCI_IOV +static u32 ngbe_read_pci_cfg_dword(struct ngbe_hw *hw, u32 reg) { - struct net_device *netdev; - struct wx *wx; - u32 err; + struct ngbe_adapter *adapter = hw->back; + u32 value; - wx = pci_get_drvdata(pdev); - netdev = wx->netdev; + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_DWORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_ERS */ - err = pci_enable_device_mem(pdev); - if (err) { - wx_err(wx, "Cannot enable PCI device from suspend\n"); - return err; +void ngbe_write_pci_cfg_word(struct ngbe_hw *hw, u32 reg, u16 value) +{ + struct ngbe_adapter *adapter = hw->back; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +#ifdef HAVE_PCI_ERS +/** + * ngbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ngbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = ngbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 12); + if (NGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + NGBE_VF_DEVICE_ID, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + NGBE_VF_DEVICE_ID, vfdev); + } + /* + * There's a slim chance the VF could have been hot + * plugged, so if it is no longer present we don't need + * to issue the VFLR.Just clean up the AER in that case. + */ + if (vfdev) { + ngbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_aer_clear_nonfatal_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + + skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + + if (!test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; } - pci_set_master(pdev); - device_wakeup_disable(&pdev->dev); - ngbe_reset_hw(wx); + if (netif_running(netdev)) + ngbe_close(netdev); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ngbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ngbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + /* + * After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_aer_clear_nonfatal_status(pdev); + + return result; +} + +/** + * ngbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ngbe_io_resume(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + +#endif rtnl_lock(); - err = wx_init_interrupt_scheme(wx); - if (!err && netif_running(netdev)) - err = ngbe_open(netdev); - if (!err) - netif_device_attach(netdev); + if (netif_running(netdev)) + ngbe_open(netdev); + + netif_device_attach(netdev); rtnl_unlock(); +} - return 0; +#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +static const struct pci_error_handlers ngbe_err_handler = { +#else +static struct pci_error_handlers ngbe_err_handler = { +#endif + .error_detected = ngbe_io_error_detected, + .slot_reset = ngbe_io_slot_reset, + .resume = ngbe_io_resume, +}; +#endif /* HAVE_PCI_ERS */ + +struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw) +{ + return ((struct ngbe_adapter *)hw->back)->netdev; +} +struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = + container_of(hw, struct ngbe_adapter, hw); + return (struct ngbe_msg *)&adapter->msg_enable; } +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static const struct dev_pm_ops ngbe_pm_ops = { + .suspend = ngbe_suspend, + .resume = ngbe_resume, + .freeze = ngbe_freeze, + .thaw = ngbe_thaw, + .poweroff = ngbe_suspend, + .restore = ngbe_resume, +}; +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh ngbe_driver_rh = { + .sriov_configure = ngbe_pci_sriov_configure, +}; +#endif + static struct pci_driver ngbe_driver = { .name = ngbe_driver_name, .id_table = ngbe_pci_tbl, .probe = ngbe_probe, - .remove = ngbe_remove, + .remove = __devexit_p(ngbe_remove), +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT + .driver = { + .pm = &ngbe_pm_ops, + }, +#else .suspend = ngbe_suspend, .resume = ngbe_resume, +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif +#ifndef USE_REBOOT_NOTIFIER .shutdown = ngbe_shutdown, +#endif + +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = ngbe_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &ngbe_driver_rh, +#endif + +#ifdef HAVE_PCI_ERS + .err_handler = &ngbe_err_handler +#endif }; -module_pci_driver(ngbe_driver); +/** + * ngbe_init_module - Driver Registration Routine + * + * ngbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ngbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ngbe_driver_string, ngbe_driver_version); + pr_info("%s\n", ngbe_copyright); + + ngbe_wq = create_singlethread_workqueue(ngbe_driver_name); + if (!ngbe_wq) { + pr_err("%s: Failed to create workqueue\n", ngbe_driver_name); + return -ENOMEM; + } + +#ifdef NGBE_PROCFS + if (ngbe_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_init(); +#endif + + ret = pci_register_driver(&ngbe_driver); + return ret; +} + +module_init(ngbe_init_module); + +/** + * ngbe_exit_module - Driver Exit Cleanup Routine + * + * ngbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ngbe_exit_module(void) +{ + pci_unregister_driver(&ngbe_driver); +#ifdef NGBE_PROCFS + ngbe_procfs_topdir_exit(); +#endif + destroy_workqueue(ngbe_wq); +#ifdef HAVE_NGBE_DEBUG_FS + ngbe_dbg_exit(); +#endif /* HAVE_NGBE_DEBUG_FS */ +} + +module_exit(ngbe_exit_module); + +/* ngbe_main.c */ -MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl); -MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); -MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..e55a5bffbcd23f4cd3f2cfe0fa2afc248ffb435b --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.c @@ -0,0 +1,692 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_type.h" +#include "ngbe.h" +#include "ngbe_mbx.h" + + +/** + * ngbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +int ngbe_read_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = hw->mbx.ops.read(hw, msg, size, mbx_id); + + return err; +} + +/** + * ngbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int ngbe_write_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = NGBE_ERR_MBX; + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + + return err; +} + +/** + * ngbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_msg(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + err = hw->mbx.ops.check_for_msg(hw, mbx_id); + + return err; +} + +/** + * ngbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_ack(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + err = hw->mbx.ops.check_for_ack(hw, mbx_id); + + return err; +} + +/** + * ngbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_rst(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * ngbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static int ngbe_poll_for_msg(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && hw->mbx.ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : NGBE_ERR_MBX; +} + +/** + * ngbe_poll_for_ack - Wait for message acknowledngbeent + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledngbeent + **/ +static int ngbe_poll_for_ack(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && hw->mbx.ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : NGBE_ERR_MBX; +} + +/** + * ngbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +int ngbe_read_posted_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = ngbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = hw->mbx.ops.read(hw, msg, size, mbx_id); +out: + return err; +} + +/** + * ngbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +int ngbe_write_posted_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return NGBE_ERR_MBX; + + /* send msg */ + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = ngbe_poll_for_ack(hw, mbx_id); + + return err; +} + + +/** + * ngbe_init_mbx_ops - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ngbe_init_mbx_ops(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; +} + + + +/** + * ngbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 ngbe_read_v2p_mailbox(struct ngbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, NGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & NGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ngbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static int ngbe_check_for_bit_vf(struct ngbe_hw *hw, u32 mask) +{ + u32 mailbox = ngbe_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : NGBE_ERR_MBX); +} + +/** + * ngbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_msg_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf sts bit */ + if (!ngbe_check_for_bit_vf(hw, NGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * ngbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static int ngbe_check_for_ack_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf ack bit */ + if (!ngbe_check_for_bit_vf(hw, NGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * ngbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +static int ngbe_check_for_rst_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + if (!ngbe_check_for_bit_vf(hw, (NGBE_VXMAILBOX_RSTD | + NGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * ngbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int ngbe_obtain_mbx_lock_vf(struct ngbe_hw *hw) +{ + int err = NGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = ngbe_read_v2p_mailbox(hw); + if (mailbox & NGBE_VXMAILBOX_VFU) + err = 0; + else + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * ngbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int ngbe_write_mbx_vf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err; + u16 i; + + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ngbe_check_for_msg_vf(hw, 0); + ngbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, NGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * ngbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static int ngbe_read_mbx_vf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err = 0; + u16 i; + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, NGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + + +/** + * ngbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ngbe_init_mbx_params_vf(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->udelay = NGBE_VF_MBX_INIT_DELAY; + + mbx->size = NGBE_VXMAILBOX_SIZE; + + mbx->ops.read = ngbe_read_mbx_vf; + mbx->ops.write = ngbe_write_mbx_vf; + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; + mbx->ops.check_for_msg = ngbe_check_for_msg_vf; + mbx->ops.check_for_ack = ngbe_check_for_ack_vf; + mbx->ops.check_for_rst = ngbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +static int ngbe_check_for_bit_pf(struct ngbe_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(hw, NGBE_MBVFICR); + int err = NGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, NGBE_MBVFICR, mask); + } + + return err; +} + +/** + * ngbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_msg_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 vf_bit = vf; + + if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFREQ_VF1 << vf_bit)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * ngbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_ack_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 vf_bit = vf; + + if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFACK_VF1 << vf_bit)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * ngbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int ngbe_check_for_rst_pf(struct ngbe_hw *hw, u16 vf) +{ + u32 vflre = 0; + int err = NGBE_ERR_MBX; + + vflre = rd32(hw, NGBE_VFLRE); + + if (vflre & (1 << vf)) { + err = 0; + wr32(hw, NGBE_VFLREC, (1 << vf)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * ngbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int ngbe_obtain_mbx_lock_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, NGBE_PXMAILBOX(vf)); + if (mailbox & NGBE_PXMAILBOX_PFU) + err = 0; + else + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + return err; +} + +/** + * ngbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int ngbe_write_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ngbe_check_for_msg_pf(hw, vf); + ngbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, NGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; + +} + +/** + * ngbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static int ngbe_read_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, NGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * ngbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ngbe_init_mbx_params_pf(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->udelay = 0; + + mbx->size = NGBE_VXMAILBOX_SIZE; + + mbx->ops.read = ngbe_read_mbx_pf; + mbx->ops.write = ngbe_write_mbx_pf; + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; + mbx->ops.check_for_msg = ngbe_check_for_msg_pf; + mbx->ops.check_for_ack = ngbe_check_for_ack_pf; + mbx->ops.check_for_rst = ngbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..068baa7968966c3492a6c55071b15469975e6206 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mbx.h @@ -0,0 +1,172 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _NGBE_MBX_H_ +#define _NGBE_MBX_H_ + +#define NGBE_VXMAILBOX_SIZE (16) + +/** + * VF Registers + **/ +#define NGBE_VXMAILBOX 0x00600 +#define NGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define NGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define NGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define NGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define NGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define NGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define NGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define NGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define NGBE_VXMAILBOX_R2C_BITS (NGBE_VXMAILBOX_RSTD | \ + NGBE_VXMAILBOX_PFSTS | NGBE_VXMAILBOX_PFACK) + +#define NGBE_VXMBMEM 0x00C00 /* 16*4B */ + +/** + * PF Registers + **/ +#define NGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,7] */ +#define NGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define NGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define NGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define NGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define NGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define NGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,7] */ + +#define NGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define NGBE_VFLRE 0x004A0 +#define NGBE_VFLREC 0x004A8 + +/* SR-IOV specific macros */ +#define NGBE_MBVFICR 0x00480 + + + +#define NGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define NGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define NGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define NGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define NGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +/** + * Messages + **/ +/* If it's a NGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is NGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define NGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define NGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define NGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define NGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define NGBE_VT_MSGINFO_MASK (0xFF << NGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ngbe_pfvf_api_rev { + ngbe_mbox_api_null, + ngbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ngbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ngbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ngbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ngbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ngbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define NGBE_VF_RESET 0x01 /* VF requests reset */ +#define NGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define NGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define NGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define NGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define NGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define NGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define NGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +#define NGBE_NOFITY_VF_LINK_STATUS 0x01 + +/* mailbox API, version 1.2 VF requests */ +#define NGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define NGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define NGBE_VF_UPDATE_XCAST_MODE 0x0c +#define NGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +#define NGBE_VF_GET_LINK_STATUS 0x20 /* VF get link status from PF */ + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ngbevf_xcast_modes { + NGBEVF_XCAST_MODE_NONE = 0, + NGBEVF_XCAST_MODE_MULTI, + NGBEVF_XCAST_MODE_ALLMULTI, + NGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define NGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define NGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define NGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define NGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define NGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define NGBE_VF_MC_TYPE_WORD 3 + +#define NGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define NGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define NGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define NGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define NGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define NGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define NGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define NGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define NGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define NGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define NGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define NGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +int ngbe_read_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_write_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_read_posted_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_write_posted_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_check_for_msg(struct ngbe_hw *, u16); +int ngbe_check_for_ack(struct ngbe_hw *, u16); +int ngbe_check_for_rst(struct ngbe_hw *, u16); +void ngbe_init_mbx_ops(struct ngbe_hw *hw); +void ngbe_init_mbx_params_vf(struct ngbe_hw *); +void ngbe_init_mbx_params_pf(struct ngbe_hw *); + +#endif /* _NGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c deleted file mode 100644 index 5007addd119aa54f0c24ecb45525e88ed333bb66..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ /dev/null @@ -1,290 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_hw.h" -#include "ngbe_type.h" -#include "ngbe_mdio.h" - -static int ngbe_phy_read_reg_internal(struct mii_bus *bus, int phy_addr, int regnum) -{ - struct wx *wx = bus->priv; - - if (phy_addr != 0) - return 0xffff; - return (u16)rd32(wx, NGBE_PHY_CONFIG(regnum)); -} - -static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - - if (phy_addr == 0) - wr32(wx, NGBE_PHY_CONFIG(regnum), value); - return 0; -} - -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - struct wx *wx = bus->priv; - u16 phy_data; - - if (wx->mac_type == em_mac_type_mdi) - phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); - else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); - - return phy_data; -} - -static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, - int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret; - - if (wx->mac_type == em_mac_type_mdi) - ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); - else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); - - return ret; -} - -static void ngbe_handle_link_change(struct net_device *dev) -{ - struct wx *wx = netdev_priv(dev); - struct phy_device *phydev; - u32 lan_speed, reg; - - phydev = wx->phydev; - if (!(wx->link != phydev->link || - wx->speed != phydev->speed || - wx->duplex != phydev->duplex)) - return; - - wx->link = phydev->link; - wx->speed = phydev->speed; - wx->duplex = phydev->duplex; - switch (phydev->speed) { - case SPEED_10: - lan_speed = 0; - break; - case SPEED_100: - lan_speed = 1; - break; - case SPEED_1000: - default: - lan_speed = 2; - break; - } - wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); - - if (phydev->link) { - reg = rd32(wx, WX_MAC_TX_CFG); - reg &= ~WX_MAC_TX_CFG_SPEED_MASK; - reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; - wr32(wx, WX_MAC_TX_CFG, reg); - /* Re configure MAC RX */ - reg = rd32(wx, WX_MAC_RX_CFG); - wr32(wx, WX_MAC_RX_CFG, reg); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - reg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, reg); - } - phy_print_status(phydev); -} - -int ngbe_phy_connect(struct wx *wx) -{ - int ret; - - /* The MAC only has add the Tx delay and it can not be modified. - * So just disable TX delay in PHY, and it is does not matter to - * internal phy. - */ - ret = phy_connect_direct(wx->netdev, - wx->phydev, - ngbe_handle_link_change, - PHY_INTERFACE_MODE_RGMII_RXID); - if (ret) { - wx_err(wx, "PHY connect failed.\n"); - return ret; - } - - return 0; -} - -static void ngbe_phy_fixup(struct wx *wx) -{ - struct phy_device *phydev = wx->phydev; - struct ethtool_eee eee; - - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - phydev->mac_managed_pm = true; - if (wx->mac_type != em_mac_type_mdi) - return; - /* disable EEE, internal phy does not support eee */ - memset(&eee, 0, sizeof(eee)); - phy_ethtool_set_eee(phydev, &eee); -} - -int ngbe_mdio_init(struct wx *wx) -{ - struct pci_dev *pdev = wx->pdev; - struct mii_bus *mii_bus; - int ret; - - mii_bus = devm_mdiobus_alloc(&pdev->dev); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "ngbe_mii_bus"; - mii_bus->read = ngbe_phy_read_reg_c22; - mii_bus->write = ngbe_phy_write_reg_c22; - mii_bus->phy_mask = GENMASK(31, 4); - mii_bus->parent = &pdev->dev; - mii_bus->priv = wx; - - if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; - } - - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); - ret = devm_mdiobus_register(&pdev->dev, mii_bus); - if (ret) - return ret; - - wx->phydev = phy_find_first(mii_bus); - if (!wx->phydev) - return -ENODEV; - - phy_attached_info(wx->phydev); - ngbe_phy_fixup(wx); - - wx->link = 0; - wx->speed = 0; - wx->duplex = 0; - - return 0; -} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h deleted file mode 100644 index 0a6400dd89c4c08da275e04bc6a25577045c77a3..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * WangXun Gigabit PCI Express Linux driver - * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. - */ - -#ifndef _NGBE_MDIO_H_ -#define _NGBE_MDIO_H_ - -int ngbe_phy_connect(struct wx *wx); -int ngbe_mdio_init(struct wx *wx); -#endif /* _NGBE_MDIO_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..26bff72d103bd744df0af7a7c9d7b5261bbc32df --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_osdep.h @@ -0,0 +1,219 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +/* glue for the OS independent part of ngbe + * includes register access macros + */ + +#ifndef _NGBE_OSDEP_H_ +#define _NGBE_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "ngbe_kcompat.h" + +#define NGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define NGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define NGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define NGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define IOMEM __iomem + +#define NGBE_NAME "ngbe" + +/* #define DBG 1 */ + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel NGBE_NAME ": %s: %s: " fmt, \ + adapter->netdev->name, \ + __func__, ## args))) + +#ifndef _WIN32 +#define ngbe_emerg(fmt, ...) printk(KERN_EMERG fmt, ## __VA_ARGS__) +#define ngbe_alert(fmt, ...) printk(KERN_ALERT fmt, ## __VA_ARGS__) +#define ngbe_crit(fmt, ...) printk(KERN_CRIT fmt, ## __VA_ARGS__) +#define ngbe_error(fmt, ...) printk(KERN_ERR fmt, ## __VA_ARGS__) +#define ngbe_warn(fmt, ...) printk(KERN_WARNING fmt, ## __VA_ARGS__) +#define ngbe_notice(fmt, ...) printk(KERN_NOTICE fmt, ## __VA_ARGS__) +#define ngbe_info(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#define ngbe_print(fmt, ...) printk(KERN_DEBUG fmt, ## __VA_ARGS__) +#define ngbe_trace(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#else /* _WIN32 */ +#define ngbe_error(lvl, fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-error: %s@%d, " fmt, \ + "ngbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* !_WIN32 */ + +#ifdef DBG +#ifndef _WIN32 +#define ngbe_debug(fmt, ...) \ + printk(KERN_DEBUG \ + "%s-debug: %s@%d, " fmt, \ + "ngbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#else /* _WIN32 */ +#define ngbe_debug(fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-debug: %s@%d, " fmt, \ + "ngbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* _WIN32 */ +#else /* DBG */ +#define ngbe_debug(fmt, ...) do {} while (0) +#endif /* DBG */ + + +#ifdef DBG +#define ASSERT(_x) BUG_ON(!(_x)) +#define DEBUGOUT(S) printk(KERN_DEBUG S) +#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGFUNC(fmt, ...) ngbe_debug(fmt, ## __VA_ARGS__) +#else +#define ASSERT(_x) do {} while (0) +#define DEBUGOUT(S) do {} while (0) +#define DEBUGOUT1(S, A...) do {} while (0) +#define DEBUGOUT2(S, A...) do {} while (0) +#define DEBUGOUT3(S, A...) do {} while (0) +#define DEBUGOUT4(S, A...) do {} while (0) +#define DEBUGOUT5(S, A...) do {} while (0) +#define DEBUGOUT6(S, A...) do {} while (0) +#define DEBUGFUNC(fmt, ...) do {} while (0) +#endif + +#define NGBE_SFP_DETECT_RETRIES 2 + +struct ngbe_hw; +struct ngbe_msg { + u16 msg_enable; +}; +struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw); +struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw); + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ngbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ngbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define NGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define NGBE_FAILED_READ_CFG_WORD 0xffffU +#define NGBE_FAILED_READ_CFG_BYTE 0xffU + +extern u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet); +extern u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg); +extern void ngbe_write_pci_cfg_word(struct ngbe_hw *hw, u32 reg, u16 value); + +#define NGBE_READ_PCIE_WORD ngbe_read_pci_cfg_word +#define NGBE_WRITE_PCIE_WORD ngbe_write_pci_cfg_word +#define NGBE_R32_Q(h, r) ngbe_read_reg(h, r, true) + +#ifndef writeq +#define writeq(val, addr) do { writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); \ + } while (0); +#endif + +#define NGBE_EEPROM_GRANT_ATTEMPS 100 +#define NGBE_HTONL(_i) htonl(_i) +#define NGBE_NTOHL(_i) ntohl(_i) +#define NGBE_NTOHS(_i) ntohs(_i) +#define NGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define NGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +enum { + NGBE_ERROR_SOFTWARE, + NGBE_ERROR_POLLING, + NGBE_ERROR_INVALID_STATE, + NGBE_ERROR_UNSUPPORTED, + NGBE_ERROR_ARGUMENT, + NGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case NGBE_ERROR_SOFTWARE: \ + case NGBE_ERROR_CAUTION: \ + case NGBE_ERROR_POLLING: \ + netif_warn(ngbe_hw_to_msg(hw), drv, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case NGBE_ERROR_INVALID_STATE: \ + case NGBE_ERROR_UNSUPPORTED: \ + case NGBE_ERROR_ARGUMENT: \ + netif_err(ngbe_hw_to_msg(hw), hw, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) +#define UNREFERENCED_PARAMETER(_p) UNREFERENCED_1PARAMETER(_p) + +#endif /* _NGBE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_param.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_param.c new file mode 100644 index 0000000000000000000000000000000000000000..9a8cc589f497ad558ab16d774cde4a02e2488d0b --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_param.c @@ -0,0 +1,932 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include +#include + +#include "ngbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define NGBE_MAX_NIC 32 +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define NGBE_PARAM_INIT { [0 ... NGBE_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when ngbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define NGBE_PARAM(X, desc) \ + static const int __devinitconst X[NGBE_MAX_NIC+1] = NGBE_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(NGBE_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else /* !module_param_array */ +#define NGBE_PARAM(X, desc) \ + static int __devinitdata X[NGBE_MAX_NIC+1] = NGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif /* module_param_array */ + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +NGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); +NGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define NGBE_INT_LEGACY 0 +#define NGBE_INT_MSI 1 +#define NGBE_INT_MSIX 2 +#define NGBE_DEFAULT_INT NGBE_INT_MSIX + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +NGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-64 + * - 0 - enables RSS and sets the Desc. Q's to min(64, num_online_cpus()). + * - 1-64 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +NGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 1 + */ + +#define NGBE_DEFAULT_NUM_VMDQ 8 + +NGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, " + "2-16 enable (default=" XSTRINGIFY(NGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 8 + +NGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/* + *Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. +*/ +NGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 980-500000 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR (NGBE_STATIC_ITR == 0) || \ + (NGBE_STATIC_ITR == 1)?NGBE_STATIC_ITR:(u16)((1000000/NGBE_STATIC_ITR) << 2) + +NGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,980-500000), default 1"); +#define MAX_ITR NGBE_MAX_INT_RATE +#define MIN_ITR NGBE_MIN_INT_RATE + +#ifndef NGBE_NO_LLI + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +#endif /* NGBE_NO_LLI */ +#ifdef HAVE_TX_MQ +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +NGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); + +#define NGBE_MAX_ATR_SAMPLE_RATE 255 +#define NGBE_MIN_ATR_SAMPLE_RATE 1 +#define NGBE_ATR_SAMPLE_RATE_OFF 0 +#define NGBE_DEFAULT_ATR_SAMPLE_RATE 20 +#endif /* HAVE_TX_MQ */ + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +NGBE_PARAM(LRO, "Large Receive Offload (0,1), default 1 = on"); + +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +NGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000)," + "default 0 = off"); + +/* Rx buffer mode + * + * Valid Range: 0-1 0 = no header split, 1 = hdr split + * + * Default Value: 0 + */ +NGBE_PARAM(RxBufferMode, "0=(default)no header split\n" + "\t\t\t1=hdr split for recognized packet\n"); + +#define NGBE_RXBUFMODE_NO_HEADER_SPLIT 0 +#define NGBE_RXBUFMODE_HEADER_SPLIT 1 +#define NGBE_DEFAULT_RXBUFMODE NGBE_RXBUFMODE_NO_HEADER_SPLIT + + +struct ngbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ngbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int __devinit ngbe_validate_option(u32 *value, + struct ngbe_option *opt) +{ + int val = (int)*value; + + if (val == OPTION_UNSET) { + ngbe_info("ngbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (val) { + case OPTION_ENABLED: + ngbe_info("ngbe: %s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + ngbe_info("ngbe: %s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((val >= opt->arg.r.min && val <= opt->arg.r.max) || + val == opt->def) { + if (opt->msg) + ngbe_info("ngbe: %s set to %d, %s\n", + opt->name, val, opt->msg); + else + ngbe_info("ngbe: %s set to %d\n", + opt->name, val); + return 0; + } + break; + case list_option: { + int i; + const struct ngbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (val == ent->i) { + if (ent->str[0] != '\0') + ngbe_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG_ON(1); + } + + ngbe_info("ngbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return -1; +} + +/** + * ngbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit ngbe_check_options(struct ngbe_adapter *adapter) +{ + u32 bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct ngbe_ring_feature *feature = adapter->ring_feature; + u32 vmdq; + + if (bd >= NGBE_MAX_NIC) { + ngbe_notice("Warning: no configuration for board #%d\n", bd); + ngbe_notice("Using defaults for all values\n"); +#ifndef module_param_array + bd = NGBE_MAX_NIC; +#endif + } + + { /* Interrupt Mode */ + u32 int_mode; + static struct ngbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of "__MODULE_STRING(NGBE_DEFAULT_INT), + .def = NGBE_DEFAULT_INT, + .arg = { .r = { .min = NGBE_INT_LEGACY, + .max = NGBE_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + ngbe_validate_option(&int_mode, &opt); + switch (int_mode) { + case NGBE_INT_MSIX: + if (!(*aflags & NGBE_FLAG_MSIX_CAPABLE)) + ngbe_info( + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case NGBE_INT_MSI: + if (!(*aflags & NGBE_FLAG_MSI_CAPABLE)) { + ngbe_info( + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + } + break; + case NGBE_INT_LEGACY: + default: + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~NGBE_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (opt.def == NGBE_INT_MSIX && + *aflags & NGBE_FLAG_MSIX_CAPABLE) { + *aflags |= NGBE_FLAG_MSIX_CAPABLE; + *aflags |= NGBE_FLAG_MSI_CAPABLE; + } else if (opt.def == NGBE_INT_MSI && + *aflags & NGBE_FLAG_MSI_CAPABLE) { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags |= NGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~NGBE_FLAG_MSI_CAPABLE; + } + } +#endif + } + { /* Multiple Queue Support */ + static struct ngbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_MQ > bd) { +#endif + u32 mq = MQ[bd]; + ngbe_validate_option(&mq, &opt); + if (mq) + *aflags |= NGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= NGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } +#endif + /* Check Interoperability */ + if ((*aflags & NGBE_FLAG_MQ_CAPABLE) && + !(*aflags & NGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } + } + + { /* Receive-Side Scaling (RSS) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + u32 rss = RSS[bd]; + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = min_t(int, ngbe_max_rss_indices(adapter), + num_online_cpus()); + +#ifdef module_param_array + if (num_RSS > bd) { +#endif + ngbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + + feature[RING_F_RSS].limit = (u16)rss; +#ifdef module_param_array + } else if (opt.def == 0) { + rss = min_t(int, ngbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_RSS].limit = rss; + } +#endif + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = NGBE_MAX_VMDQ_INDICES + } } + }; + +#ifdef module_param_array + if (num_VMDQ > bd) { +#endif + vmdq = VMDQ[bd]; + + ngbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= NGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = (u16)vmdq; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= NGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } +#endif + /* Check Interoperability */ + if (*aflags & NGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + u32 vfs = max_vfs[bd]; + if (ngbe_validate_option(&vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->num_vfs = vfs; + + if (vfs) + *aflags |= NGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) { + adapter->num_vfs = 0; + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->num_vfs = opt.def; + *aflags |= NGBE_FLAG_SRIOV_ENABLED; + } + } +#endif + + /* Check Interoperability */ + if (*aflags & NGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & NGBE_FLAG_SRIOV_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported on this " + "hardware. Disabling IOV.\n"); + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; + } else if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported while multiple " + "queues are disabled. " + "Disabling IOV.\n"); + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct ngbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + +#ifdef module_param_array + if (num_VEPA > bd) { +#endif + u32 vepa = VEPA[bd]; + ngbe_validate_option(&vepa, &opt); + if (vepa) + adapter->flags |= + NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } +#endif + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + ngbe_validate_option(&itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (u16)((1000000/itr) << 2); + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } +#ifndef NGBE_NO_LLI + { /* Low Latency Interrupt TCP Port*/ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + +#ifdef module_param_array + if (num_LLIPort > bd) { +#endif + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + ngbe_validate_option(&adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_port = opt.def; + } +#endif + } + { /* Low Latency Interrupt on Packet Size */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + +#ifdef module_param_array + if (num_LLISize > bd) { +#endif + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + ngbe_validate_option(&adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_size = opt.def; + } +#endif + } + { /* Low Latency Interrupt EtherType*/ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + +#ifdef module_param_array + if (num_LLIEType > bd) { +#endif + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + ngbe_validate_option(&adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_etype = opt.def; + } +#endif + } + { /* LLI VLAN Priority */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + +#ifdef module_param_array + if (num_LLIVLANP > bd) { +#endif + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + ngbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_vlan_pri = opt.def; + } +#endif + } +#endif /* NGBE_NO_LLI */ +#ifdef HAVE_TX_MQ + { /* Flow Director ATR Tx sample packet rate */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(NGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = NGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = NGBE_ATR_SAMPLE_RATE_OFF, + .max = NGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + ngbe_validate_option(&adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } +#endif /* HAVE_TX_MQ */ + { /* LRO - Set Large Receive Offload */ + struct ngbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", +/* lro switch to ON when run on SW and FT platform */ +/* emerald temp setting */ +#if defined(TXGBE_SUPPORT_DEEPIN_SW) || \ + defined(TXGBE_SUPPORT_KYLIN_SW) || \ + defined(TXGBE_SUPPORT_KYLIN_FT) + .def = OPTION_ENABLED +#else + .def = OPTION_DISABLED +#endif + }; + struct net_device *netdev = adapter->netdev; + +#ifdef NGBE_NO_LRO + opt.def = OPTION_DISABLED; + +#endif +#ifdef module_param_array + if (num_LRO > bd) { +#endif + u32 lro = LRO[bd]; + ngbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; +#ifdef module_param_array + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } else { + netdev->features &= ~NETIF_F_LRO; + } +#endif +#ifdef NGBE_NO_LRO + if ((netdev->features & NETIF_F_LRO)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } +#endif /* NGBE_NO_LRO */ + } + { /* DMA Coalescing */ + struct ngbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this " + "hardware"; + + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + +#ifdef module_param_array + if (num_dmac_watchdog > bd) { +#endif + u32 dmac_wd = dmac_watchdog[bd]; + + ngbe_validate_option(&dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = (u16)dmac_wd; +#ifdef module_param_array + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } +#endif + } + + { /* Rx buffer mode */ + u32 rx_buf_mode; + static struct ngbe_option opt = { + .type = range_option, + .name = "Rx buffer mode", + .err = "using default of " + __MODULE_STRING(NGBE_DEFAULT_RXBUFMODE), + .def = NGBE_DEFAULT_RXBUFMODE, + .arg = {.r = {.min = NGBE_RXBUFMODE_NO_HEADER_SPLIT, + .max = NGBE_RXBUFMODE_HEADER_SPLIT} } + + }; + +#ifdef module_param_array + if (num_RxBufferMode > bd) { +#endif + rx_buf_mode = RxBufferMode[bd]; + ngbe_validate_option(&rx_buf_mode, &opt); + switch (rx_buf_mode) { + case NGBE_RXBUFMODE_NO_HEADER_SPLIT: + *aflags &= ~NGBE_FLAG_RX_HS_ENABLED; + break; + case NGBE_RXBUFMODE_HEADER_SPLIT: + *aflags |= NGBE_FLAG_RX_HS_ENABLED; + break; + default: + break; + } +#ifdef module_param_array + } else { + *aflags &= ~NGBE_FLAG_RX_HS_ENABLED; + } +#endif + + } +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c new file mode 100644 index 0000000000000000000000000000000000000000..aa2528cb1c7c326e624483327c764992360d1307 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.c @@ -0,0 +1,293 @@ +#include +#include +#include "ngbe_pcierr.h" +#include "ngbe.h" +#define NGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \ + PCI_ERR_ROOT_CMD_NONFATAL_EN| \ + PCI_ERR_ROOT_CMD_FATAL_EN) + +#ifndef PCI_ERS_RESULT_NO_AER_DRIVER +/* No AER capabilities registered for the driver */ +#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t) 6) +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +/* redefinition because centos 6 can't use pci_walk_bus in pci.h*/ + +struct rw_semaphore pci_bus_sem; + +/** pci_walk_bus - walk devices on/under bus, calling callback. + * @top bus whose devices should be walked + * @cb callback to be called for each device found + * @userdata arbitrary pointer to be passed to callback. + * + * Walk the given bus, including any bridged devices + * on buses under this bus. Call the provided callback + * on each device found. + * + * We check the return of @cb each time. If it returns anything + * other than 0, we break out. + * + */ +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata) +{ + struct pci_dev *dev; + struct pci_bus *bus; + struct list_head *next; + int retval; + + bus = top; + down_read(&pci_bus_sem); + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + retval = cb(dev, userdata); + if (retval) + break; + } + up_read(&pci_bus_sem); +} +#endif + +static pci_ers_result_t merge_result(enum pci_ers_result orig, + enum pci_ers_result new) +{ + if (new == PCI_ERS_RESULT_NO_AER_DRIVER) + return PCI_ERS_RESULT_NO_AER_DRIVER; + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { + case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_RECOVERED: + orig = new; + break; + case PCI_ERS_RESULT_DISCONNECT: + if (new == PCI_ERS_RESULT_NEED_RESET) + orig = PCI_ERS_RESULT_NEED_RESET; + break; + default: + break; + } + return orig; +} + +static int ngbe_report_error_detected(struct pci_dev *dev, + pci_channel_state_t state, + enum pci_ers_result *result) +{ + pci_ers_result_t vote; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->error_detected) { + /* + * If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. + */ + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + vote = PCI_ERS_RESULT_NO_AER_DRIVER; + else + vote = PCI_ERS_RESULT_NONE; + } else { + err_handler = dev->driver->err_handler; + vote = err_handler->error_detected(dev, state); + } + + *result = merge_result(*result, vote); + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_frozen_detected(struct pci_dev *dev, void *data) +{ + return ngbe_report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int ngbe_report_mmio_enabled(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->mmio_enabled) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->mmio_enabled(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_slot_reset(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->slot_reset) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->slot_reset(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_resume(struct pci_dev *dev, void *data) +{ + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + dev->error_state = pci_channel_io_normal; + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->resume) + goto out; + + err_handler = dev->driver->err_handler; + err_handler->resume(dev); +out: + device_unlock(&dev->dev); + return 0; +} + +void ngbe_pcie_do_recovery(struct pci_dev *dev) +{ + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + u32 reg32; + int pos; + int delay = 1; + u32 id; + u16 ctrl; + /* + * Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_walk_bus(bus, ngbe_report_frozen_detected, &status); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~NGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double + * * this to 2ms to ensure that we meet the minimum requirement. + * */ + + msleep(2); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * Trhfa for conventional PCI is 2^25 clock cycles. + * * Assuming a minimum 33MHz clock this results in a 1s + * * delay before we can consider subordinate devices to + * * be re-initialized. PCIe has some ways to shorten this, + * * but we don't make use of them yet. + * */ + ssleep(1); + + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > 60000) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, "bus_reset"); + return; + } + + if (delay > 1000) + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, "bus_reset"); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + pci_info(dev, "ready %dms after %s\n", delay - 1, + "bus_reset"); + + pci_info(dev, "Root Port link has been reset\n"); + + if (pos) { + /* Clear Root Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); + + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= NGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, ngbe_report_mmio_enabled, &status); + } + + if (status == PCI_ERS_RESULT_NEED_RESET) { + /* + * TODO: Should call platform-specific + * functions to reset slot before calling + * drivers' slot_reset callbacks? + */ + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, ngbe_report_slot_reset, &status); + } + + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, ngbe_report_resume, &status); + +failed: + return; +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h new file mode 100644 index 0000000000000000000000000000000000000000..81b41ec65338175281912c4b8f3731abff2628b0 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_pcierr.h @@ -0,0 +1,6 @@ +#ifndef _NGBE_PCIERR_H_ +#define _NGBE_PCIERR_H_ + +void ngbe_pcie_do_recovery(struct pci_dev *dev); +#endif + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c new file mode 100644 index 0000000000000000000000000000000000000000..5a094701fb420cb24b3ee406e51bfe708bd7c786 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c @@ -0,0 +1,1777 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_phy.h" +#include "ngbe_hw.h" + +/** + * ngbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +bool ngbe_check_reset_blocked(struct ngbe_hw *hw) +{ + u32 mmngc; + + mmngc = rd32(hw, NGBE_MIS_ST); + if (mmngc & NGBE_MIS_ST_MNG_VETO) { + return true; + } + + return false; +} + +/* For internal phy only */ +static int ngbe_phy_read_reg(struct ngbe_hw *hw, u32 reg_offset, + u32 page, u16 *phy_data) +{ + /* clear input */ + *phy_data = 0; + + if (!((page == 0xa43) && ((reg_offset == 0x1a) || (reg_offset == 0x1d)))) + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), page); + + *phy_data = 0xFFFF & rd32(hw, NGBE_PHY_CONFIG(reg_offset)); + + return NGBE_OK; +} + +/* For internal phy only */ +static int ngbe_phy_write_reg(struct ngbe_hw *hw, u32 reg_offset, + u32 page, u16 phy_data) +{ + + if (!((page == 0xa43) && ((reg_offset == 0x1a) || (reg_offset == 0x1d)))) + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), page); + wr32(hw, NGBE_PHY_CONFIG(reg_offset), phy_data); + + return NGBE_OK; +} + +static int ngbe_check_internal_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u16 phy_id = 0; + + ngbe_gphy_wait_mdio_access_on(hw); + + hw->phy.ops.read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + hw->phy.ops.read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (phy_id != NGBE_INTERNAL_PHY_ID) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "internal phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + hw->phy.id = (u32)phy_id; + + return NGBE_OK; +} + + +/** + * ngbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +int ngbe_phy_read_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + u32 command; + int status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, NGBE_MSCC); + + return 0; +} + +/** + * ngbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +int ngbe_phy_write_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + u32 command; + int status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = phy_data | NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + return 0; +} + +int ngbe_phy_read_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_mdi(hw, 0x1e, device_type, reg_addr); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, 0x1f, device_type, phy_data); + + return status; +} + +int ngbe_phy_write_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_mdi(hw, 0x1e, device_type, reg_addr); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, 0x1f, device_type, phy_data); + + return status; +} + +int ngbe_phy_read_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_read_reg_ext_yt8521s(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + +int ngbe_phy_write_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_write_reg_ext_yt8521s(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + + +int ngbe_phy_read_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + +int ngbe_phy_write_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + int status = 0; + + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + + return status; +} + +static int ngbe_check_mdi_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u32 phy_id = 0; + u8 value = 0; + u32 phy_mode = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + /* select page 0 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 0); + } else { + /* select page 1 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 1); + } + + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (phy_id != NGBE_M88E1512_PHY_ID) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + hw->phy.id = phy_id; + + if (hw->phy.type == ngbe_phy_m88e1512_unknown) { + ngbe_flash_read_dword(hw, 0xff010, &phy_mode); + switch (hw->bus.lan_id) { + case 0: + value = (u8)phy_mode; + break; + case 1: + value = (u8)(phy_mode >> 8); + break; + case 2: + value = (u8)(phy_mode >> 16); + break; + case 3: + value = (u8)(phy_mode >> 24); + break; + default: + break; + } + if ((value & 0x7) == 0) + /* mode select to RGMII-to-copper */ + hw->phy.type = ngbe_phy_m88e1512; + else if ((value & 0x7) == 0x2) + /* mode select to RGMII-to-sfi */ + hw->phy.type = ngbe_phy_m88e1512_sfi; + else { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "marvell 88E1512 mode %x is not supported.\n", value); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + } + + return NGBE_OK; +} + +static bool ngbe_validate_phy_addr(struct ngbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + unsigned long flags; + + hw->phy.addr = phy_addr; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x3, 0, &phy_id); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +static int ngbe_check_yt_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id = 0; + bool valid = false; + u32 phy_addr; + unsigned long flags; + + for (phy_addr = 0; phy_addr < 32; phy_addr++) { + valid = ngbe_validate_phy_addr(hw, phy_addr); + if (valid) { + hw->phy.addr = phy_addr; + break; + } + } + if (!valid) + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x3, 0, &phy_id); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((phy_id != NGBE_YT8521S_PHY_ID) && (phy_id != NGBE_YT8531S_PHY_ID)) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + hw->phy.id = phy_id; + + return NGBE_OK; +} + +/** + * ngbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * +**/ +int ngbe_phy_init(struct ngbe_hw *hw) +{ + int ret_val = 0; + u16 value = 0; + int i; + u8 lan_id = hw->bus.lan_id; + struct ngbe_adapter *adapter = hw->back; + unsigned long flags; + + /* set fwsw semaphore mask for phy first */ + if (!hw->phy.phy_semaphore_mask) { + hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY; + } + + if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) + return 0; + /* init phy.addr according to HW design */ + + hw->phy.addr = 0; + spin_lock_init(&hw->phy_lock); + + /* Identify the PHY or SFP module */ + ret_val = hw->phy.ops.identify(hw); + if (ret_val == NGBE_ERR_SFP_NOT_SUPPORTED) + return ret_val; + + /* enable interrupts, only link status change and an done is allowed */ + if (hw->phy.type == ngbe_phy_internal || hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC; + hw->phy.ops.write_reg(hw, 0x12, 0xa42, value); + ngbe_flash_read_dword(hw , 0xfe010 + lan_id * 8, &adapter->gphy_efuse[0]); + ngbe_flash_read_dword(hw , 0xfe010 + lan_id * 8 + 4, &adapter->gphy_efuse[1]); + } else if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 2); + hw->phy.ops.read_reg_mdi(hw, 21, 0, &value); + value &= ~NGBE_M88E1512_RGM_TTC; + value |= NGBE_M88E1512_RGM_RTC; + hw->phy.ops.write_reg_mdi(hw, 21, 0, value); + if (hw->phy.type == ngbe_phy_m88e1512) + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + + hw->phy.ops.write_reg_mdi(hw, 0, 0, NGBE_MDI_PHY_RESET); + for (i = 0; i < 15; i++) { + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); + if (value & NGBE_MDI_PHY_RESET) + msleep(1); + else + break; + } + + if (i == 15) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "phy reset exceeds maximum waiting period.\n"); + return NGBE_ERR_PHY_TIMEOUT; + } + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + return ret_val; + } + + /* set LED2 to interrupt output and INTn active low */ + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 18, 0, &value); + value |= NGBE_M88E1512_INT_EN; + value &= ~(NGBE_M88E1512_INT_POL); + hw->phy.ops.write_reg_mdi(hw, 18, 0, value); + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 16, 0, &value); + value &= ~0x4; + hw->phy.ops.write_reg_mdi(hw, 16, 0, value); + } + + /* enable link status change and AN complete interrupts */ + value = NGBE_M88E1512_INT_ANC | NGBE_M88E1512_INT_LSC; + if (hw->phy.type == ngbe_phy_m88e1512) + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.write_reg_mdi(hw, 18, 0, value); + + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); + value |= 0x800; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + if (NGBE_POLL_LINK_STATUS != 1) { + /*enable yt8521s interrupt*/ + /* select sds area register */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, 0, 0x00); + + /* enable interrupt */ + value = 0x0C0C; + hw->phy.ops.write_reg_mdi(hw, 0x12, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + if (!hw->ncsi_enabled) { + /* power down in Fiber mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value |= 0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + + /* power down in UTP mode */ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + } + + return ret_val; +} + + +/** + * ngbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +int ngbe_phy_identify(struct ngbe_hw *hw) +{ + int status = 0; + + switch(hw->phy.type) { + case ngbe_phy_internal: + case ngbe_phy_internal_yt8521s_sfi: + status = ngbe_check_internal_phy_id(hw); + break; + case ngbe_phy_m88e1512: + case ngbe_phy_m88e1512_sfi: + case ngbe_phy_m88e1512_unknown: + status = ngbe_check_mdi_phy_id(hw); + break; + case ngbe_phy_yt8521s_sfi: + status = ngbe_check_yt_phy_id(hw); + break; + default: + status = NGBE_ERR_PHY_TYPE; + } + + return status; +} + +static int ngbe_gphy_reset(struct ngbe_hw *hw, bool need_restart_AN) +{ + int status, i; + u16 val; + + if (!need_restart_AN) + return 0; + + val = NGBE_MDI_PHY_RESET; + status = hw->phy.ops.write_reg(hw, 0, 0, val); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = hw->phy.ops.read_reg(hw, 0, 0, &val); + if (!(val & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +int ngbe_phy_reset(struct ngbe_hw *hw) +{ + int status = 0; + + u16 value = 0; + int i; + + /* only support internal phy */ + if (hw->phy.type != ngbe_phy_internal && + hw->phy.type != ngbe_phy_internal_yt8521s_sfi) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "ngbe_phy_reset: operation not supported.\n"); + return NGBE_ERR_PHY_TYPE; + } + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + NGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + value |= NGBE_MDI_PHY_RESET; + status = hw->phy.ops.write_reg(hw, 0, 0, value); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = hw->phy.ops.read_reg(hw, 0, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + u16 value = 0; + int status = 0; + + status = ngbe_gphy_reset(hw, need_restart_AN); + if (!hw->mac.autoneg) { + if (status) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "call phy reset return %d.\n", status); + return NGBE_ERR_PHY; + } + + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX; + hw->phy.ops.write_reg(hw, 0, 0, value); + + goto skip_an; + } + + /* disable 10/100M Half Duplex */ + hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= 0xFF5F; + hw->phy.ops.write_reg(hw, 4, 0, value); + + /* set advertise enable according to input speed */ + if (!(speed & NGBE_LINK_SPEED_1GB_FULL)) { + hw->phy.ops.read_reg(hw, 9, 0, &value); + value &= 0xFDFF; + hw->phy.ops.write_reg(hw, 9, 0, value); + } else { + hw->phy.ops.read_reg(hw, 9, 0, &value); + value |= 0x200; + hw->phy.ops.write_reg(hw, 9, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_100_FULL)) { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= 0xFEFF; + hw->phy.ops.write_reg(hw, 4, 0, value); + } else { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value |= 0x100; + hw->phy.ops.write_reg(hw, 4, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_10_FULL)) { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= 0xFFBF; + hw->phy.ops.write_reg(hw, 4, 0, value); + } else { + hw->phy.ops.read_reg(hw, 4, 0, &value); + value |= 0x40; + hw->phy.ops.write_reg(hw, 4, 0, value); + } + + /* restart AN and wait AN done interrupt */ + if (hw->ncsi_enabled) { + if (need_restart_AN) + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + else + value = NGBE_MDI_PHY_ANE; + } else { + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + } + + hw->phy.ops.write_reg(hw, 0, 0, value); +skip_an: + hw->phy.ops.phy_led_ctrl(hw); + + hw->phy.ops.check_event(hw); + + return NGBE_OK; +} + +u32 ngbe_phy_led_ctrl(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + if (adapter->led_conf != -1) + value = adapter->led_conf & 0xffff; + else + value =0x205B; + hw->phy.ops.write_reg(hw, 16, 0xd04, value); + hw->phy.ops.write_reg(hw, 17, 0xd04, 0); + + hw->phy.ops.read_reg(hw, 18, 0xd04, &value); + if (adapter->led_conf != -1) { + value &= ~0x73; + value |= adapter->led_conf >> 16; + } else { + value = value & 0xFFFC; + /*act led blinking mode set to 60ms*/ + value |= 0x2; + } + hw->phy.ops.write_reg(hw, 18, 0xd04, value); + + return 0; +} + +int ngbe_phy_reset_m88e1512(struct ngbe_hw *hw) +{ + int status = 0; + + u16 value = 0; + int i; + + if (hw->phy.type != ngbe_phy_m88e1512 && + hw->phy.type != ngbe_phy_m88e1512_sfi) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + NGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + /* select page 18 reg 20 */ + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 18); + + if (hw->phy.type == ngbe_phy_m88e1512) + /* mode select to RGMII-to-copper */ + value = 0; + else + /* mode select to RGMII-to-sfi */ + value = 2; + status = hw->phy.ops.write_reg_mdi(hw, 20, 0, value); + /* mode reset */ + value |= NGBE_MDI_PHY_RESET; + status = hw->phy.ops.write_reg_mdi(hw, 20, 0, value); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = hw->phy.ops.read_reg_mdi(hw, 20, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "M88E1512 MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +int ngbe_phy_reset_yt8521s(struct ngbe_hw *hw) +{ + int status = 0; + + u16 value = 0; + int i; + unsigned long flags; + + if (hw->phy.type != ngbe_phy_yt8521s_sfi) + return NGBE_ERR_PHY_TYPE; + + if (hw->ncsi_enabled) + return status; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + NGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + + /* check chip_mode first */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xa001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & 7) != 0) {/* fiber_to_rgmii */ + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0, 0, &value); + /* sds software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (!(value & 0x8000)) + break; + msleep(1); + } + } else {/* utp_to_rgmii */ + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_mdi(hw, 0, 0, &value); + /* software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_mdi(hw, 0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_mdi(hw, 0, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (!(value & 0x8000)) + break; + msleep(1); + } + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "YT8521S Software RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + + +u32 ngbe_phy_setup_link_m88e1512(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u16 value_r4 = 0; + u16 value_r9 = 0; + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + + if (adapter->led_conf == -1) { + /* LED control */ + hw->phy.ops.write_reg_mdi(hw, 22, 0, 3); + hw->phy.ops.read_reg_mdi(hw, 16, 0, &value); + value &= ~0x00FF; + value |= (NGBE_M88E1512_LED1_CONF << 4) | NGBE_M88E1512_LED0_CONF; + hw->phy.ops.write_reg_mdi(hw, 16, 0, value); + hw->phy.ops.read_reg_mdi(hw, 17, 0, &value); + value &= ~0x000F; + value |= (NGBE_M88E1512_LED1_POL << 2) | NGBE_M88E1512_LED0_POL; + hw->phy.ops.write_reg_mdi(hw, 17, 0, value); + } + + hw->phy.autoneg_advertised = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + + goto skip_an; + } + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + value_r9 |=NGBE_M88E1512_1000BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } + + if (speed & NGBE_LINK_SPEED_100_FULL) { + value_r4 |= NGBE_M88E1512_100BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + } + + if (speed & NGBE_LINK_SPEED_10_FULL) { + value_r4 |= NGBE_M88E1512_10BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + } + + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~(NGBE_M88E1512_100BASET_FULL | + NGBE_M88E1512_100BASET_HALF | + NGBE_M88E1512_10BASET_FULL | + NGBE_M88E1512_10BASET_HALF); + value_r4 |= value; + hw->phy.ops.write_reg_mdi(hw, 4, 0, value_r4); + + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + hw->phy.ops.read_reg_mdi(hw, 9, 0, &value); + value &= ~(NGBE_M88E1512_1000BASET_FULL | + NGBE_M88E1512_1000BASET_HALF); + value_r9 |= value; + hw->phy.ops.write_reg_mdi(hw, 9, 0, value_r9); + + value = NGBE_MDI_PHY_RESTART_AN | + NGBE_MDI_PHY_ANE | + NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + } else { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~0x60; + value |= 0x20; + hw->phy.ops.write_reg_mdi(hw, 4, 0, value); + + if (hw->mac.autoneg) + value = NGBE_MDI_PHY_RESTART_AN | + NGBE_MDI_PHY_ANE | + NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX | + NGBE_MDI_PHY_SPEED_SELECT1; + else + value = NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX | + NGBE_MDI_PHY_SPEED_SELECT1; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + } + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); +skip_an: + hw->phy.ops.read_reg_mdi(hw, 0, 0, &value); + value &= ~0x800; + hw->phy.ops.write_reg_mdi(hw, 0, 0, value); + msleep(5); + + hw->phy.ops.check_event(hw); + + + return NGBE_OK; +} + +u32 ngbe_phy_setup_link_yt8521s(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + int ret_val = 0; + u16 value = 0; + u16 value_r4 = 0; + u16 value_r9 = 0; + unsigned long flags; + + if (hw->ncsi_enabled) + return ret_val; + hw->phy.autoneg_advertised = 0; + + /* check chip_mode first */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & 7) == 0) {/* utp_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + goto skip_an; + } + + value_r4 = 0x1E0; + value_r9 = 0x300; + /*disable 100/10base-T Self-negotiation ability*/ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &=~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &=~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |=value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |=value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x9200; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); +skip_an: + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 1) {/* fiber_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_LINK_SPEED_1GB_FULL; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_LINK_SPEED_100_FULL; + break; + default: + value = NGBE_LINK_SPEED_1GB_FULL; + break; + } + hw->phy.autoneg_advertised |= value; + goto skip_an_fiber; + } + + value = 0; + if (speed & NGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + if (speed & NGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; +skip_an_fiber: + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA006, 0, &value); + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) + value |= 0x1; + else if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + value &= ~0x1; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA006, 0, value); + + /* close auto sensing */ + ngbe_phy_read_reg_sds_ext_yt8521s(hw, 0xA5, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_sds_ext_yt8521s(hw, 0xA5, 0, value); + + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + /* RGMII_Config1 : Config rx and tx training delay */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA003, 0, 0x3cf1); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, 0x8041); + + /* software reset */ + if (hw->mac.autoneg) { + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0,0x9340); + } else { + value = NGBE_YT8521S_PHY_RESET | NGBE_YT8521S_PHY_DUPLEX; + if (speed & NGBE_LINK_SPEED_1GB_FULL) + value |= NGBE_YT8521S_PHY_SPEED_SELECT1; + if (speed & NGBE_LINK_SPEED_100_FULL) + value |= NGBE_YT8521S_PHY_SPEED_SELECT0; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); + + } else if ((value & 7) == 2) { + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + + /* power on in Fiber mode */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (value & 0x400) { /* fiber up */ + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } else { /* utp up */ + value_r4 = 0x1E0; + value_r9 = 0x300; + /*disable 100/10base-T Self-negotiation ability*/ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &=~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &=~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |=value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |=value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + } else if ((value & 7) == 4) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA003, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA003, 0, value); + + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA004, 0, &value); + value &= ~0xf0; + value |= 0xb0; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA004, 0, value); + + ngbe_phy_read_reg_ext_yt8521s(hw, 0xA001, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, value); + + /* power on phy */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 5) {/* sgmii_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + goto skip_an_sr; + } + + value = 0; + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value |= 0x40; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value |= 0x2000; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value |= 0x0; + } + + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + + /* software reset to make the above configuration take effect */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value |= 0x9200; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); +skip_an_sr: + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + hw->phy.ops.check_event(hw); + + return ret_val; +} + +/** + * ngbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +int ngbe_phy_check_overtemp(struct ngbe_hw *hw) +{ + int status = 0; + u32 ts_state; + + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, NGBE_TS_ALARM_ST); + + if (ts_state & NGBE_TS_ALARM_ST_DALARM) + status = NGBE_ERR_UNDERTEMP; + else if (ts_state & NGBE_TS_ALARM_ST_ALARM) + status = NGBE_ERR_OVERTEMP; + + return status; +} + +int ngbe_phy_check_event(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + hw->phy.ops.read_reg(hw, 0x1d, 0xa43, &value); + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + if (value & BIT(4)) + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + else if (value & BIT(3)) + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return NGBE_OK; +} + +int ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + if (hw->phy.type == ngbe_phy_m88e1512) + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + else + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 19, 0, &value); + + if (value & NGBE_M88E1512_LSC) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } + + if (value & NGBE_M88E1512_ANC) { + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + } + + return NGBE_OK; +} + +int ngbe_phy_check_event_yt8521s(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000,0,0x0); + hw->phy.ops.read_reg_mdi(hw, 0x13, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & (NGBE_YT8521S_SDS_LINK_UP | NGBE_YT8521S_SDS_LINK_DOWN)) || + (value & (NGBE_YT8521S_UTP_LINK_UP | NGBE_YT8521S_UTP_LINK_DOWN))) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } + + return NGBE_OK; +} + +static int ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + status = hw->phy.ops.read_reg(hw, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +int ngbe_phy_get_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +int ngbe_phy_get_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x04, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + *pause_bit = (u8)((value >> 7) & 0x3); + return status; +} + +static int ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + status = hw->phy.ops.read_reg(hw, 0x1d, 0xa43, &value); + + status = hw->phy.ops.read_reg(hw, 0x1, 0, &value); + value = (value >> 5) & 0x1; + + /* if AN complete then check lp adv pause */ + status = hw->phy.ops.read_reg(hw, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +int ngbe_phy_get_lp_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + status = hw->phy.ops.read_reg_mdi(hw, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 5, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +int ngbe_phy_get_lp_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value = 0; + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x05, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + *pause_bit = (u8)((value >> 7) & 0x3); + return status; + +} + +static int ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, u16 pause_bit) +{ + u16 value = 0; + int status = 0; + + status = hw->phy.ops.read_reg(hw, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = hw->phy.ops.write_reg(hw, 4, 0, value); + return status; +} + +int ngbe_phy_set_pause_advertisement_m88e1512(struct ngbe_hw *hw, + u16 pause_bit) +{ + u16 value = 0; + int status = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = hw->phy.ops.write_reg_mdi(hw, 4, 0, value); + } else { + status = hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + status = hw->phy.ops.read_reg_mdi(hw, 4, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = hw->phy.ops.write_reg_mdi(hw, 4, 0, value); + } + + return status; +} + +int ngbe_phy_set_pause_advertisement_yt8521s(struct ngbe_hw *hw, + u16 pause_bit) +{ + u16 value = 0; + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x04, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x04, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + return status; +} +int ngbe_gphy_dis_eee(struct ngbe_hw *hw) +{ + u16 val = 0; + + hw->phy.ops.write_reg(hw, 0x11, 0xa4b, 0x1110); + hw->phy.ops.write_reg(hw, 0xd, 0x0, 0x7); + hw->phy.ops.write_reg(hw, 0xe, 0x0, 0x003c); + hw->phy.ops.write_reg(hw, 0xd, 0x0, 0x4007); + hw->phy.ops.write_reg(hw, 0xe, 0x0, 0); + + /* disable 10/100M Half Duplex */ + msleep(100); + hw->phy.ops.read_reg(hw, 4, 0, &val); + val &= 0xff5f; + hw->phy.ops.write_reg(hw, 0x4, 0x0, val); + + return 0; +} + +int ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw) +{ + int i; + u16 val = 0; + struct ngbe_adapter *adapter = hw->back; + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, 29, 0xa43, &val); + if (val & 0x20) { + e_info(hw, "mdio_access ready\n"); + break; + } + usec_delay(1000); + } + + if (i == 100) + e_info(hw, "ngbe_gphy_wait_mdio_access_on timeout\n"); + + return 0; +} + +int ngbe_gphy_efuse_calibration(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u32 efuse[2]; + u16 val; + + ngbe_gphy_wait_mdio_access_on(hw); + efuse[0] = adapter->gphy_efuse[0]; + efuse[1] = adapter->gphy_efuse[1]; + + e_info(hw, "=1=port %d efuse[0] = %08x, efuse[1] = %08x\n", hw->bus.lan_id, efuse[0], efuse[1]); + + if (!efuse[0] && !efuse[1]) { + efuse[0] = 0xFFFFFFFF; + efuse[1] = 0xFFFFFFFF; + } + + /* calibration */ + efuse[0] |= 0xF0000100; + efuse[1] |= 0xFF807FFF; + e_info(hw, "=2=port %d efuse[0] = %08x, efuse[1] = %08x\n", hw->bus.lan_id, efuse[0], efuse[1]); + + /* EODR, Efuse Output Data Register */ + hw->phy.ops.write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xFFFF); + hw->phy.ops.write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xFFFF); + hw->phy.ops.write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xFFFF); + hw->phy.ops.write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xFFFF); + + hw->phy.ops.write_reg(hw, 20, 0xa46, 0x01); /* set efuse ready */ + ngbe_gphy_wait_mdio_access_on(hw); + hw->phy.ops.write_reg(hw, 27, 0xa43, 0x8011); + hw->phy.ops.write_reg(hw, 28, 0xa43, 0x5737); + /* dis fall to 100m */ + hw->phy.ops.read_reg(hw, 17, 0xa44, &val); + val &= ~0x8; + hw->phy.ops.write_reg(hw, 17, 0xa44, val); + ngbe_gphy_dis_eee(hw); + + return 0; +} + +static int ngbe_phy_setup(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 value = 0; + int i; + + if (test_bit(__NGBE_NO_PHY_SET, &adapter->state)) + return 0; + ngbe_gphy_efuse_calibration(hw); + hw->phy.ops.write_reg(hw, 20, 0xa46, 2); + ngbe_gphy_wait_mdio_access_on(hw); + + for (i = 0; i < 100;i++) { + hw->phy.ops.read_reg(hw, 16, 0xa42, &value); + if ((value & 0x7) == 3) + break; + usec_delay(1000); + } + + if (i == 100) + return NGBE_ERR_PHY_TIMEOUT; + + return 0; +} + +static int ngbe_phy_read_reg_internal(struct ngbe_hw *hw, int phy_addr, int regnum) +{ + if (phy_addr != 0) + return 0xffff; + return (u16)rd32(hw, NGBE_PHY_CONFIG(regnum)); +} + +static int ngbe_phy_write_reg_internal(struct ngbe_hw *hw, int phy_addr, int regnum, u16 value) +{ + if (phy_addr == 0) + wr32(hw, NGBE_PHY_CONFIG(regnum), value); + return 0; +} + +static int ngbe_phy_read_reg_mdi_c22(struct ngbe_hw *hw, int phy_addr, int regnum) +{ + u32 command, device_type = 0; + int ret; + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + ret = po32m(hw, NGBE_MSCC, NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (ret) + return ret; + + return (u16)rd32(hw, NGBE_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c22(struct ngbe_hw *hw, int phy_addr, int regnum, u16 value) +{ + u32 command, device_type = 0; + int ret; + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + command = value | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + ret = po32m(hw, NGBE_MSCC, NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + + return ret; +} + +static int ngbe_phy_read_reg_c22(struct ngbe_hw *hw, int phy_addr, int regnum) +{ + u16 phy_data; + + if (hw->mac_type == em_mac_type_mdi) + phy_data = ngbe_phy_read_reg_internal(hw, phy_addr, regnum); + else + phy_data = ngbe_phy_read_reg_mdi_c22(hw, phy_addr, regnum); + + return phy_data; +} + +static int ngbe_phy_write_reg_c22(struct ngbe_hw *hw, int phy_addr, + int regnum, u16 value) +{ + int ret; + + if (hw->mac_type == em_mac_type_mdi) + ret = ngbe_phy_write_reg_internal(hw, phy_addr, regnum, value); + else + ret = ngbe_phy_write_reg_mdi_c22(hw, phy_addr, regnum, value); + + return ret; +} + +static int ngbe_genphy_suspend(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 val; + + if (adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP || + hw->ncsi_enabled) + return 0; + hw->phy.ops.read_reg(hw, 0x0, 0x0, &val); + + return hw->phy.ops.write_reg(hw, 0x0, 0x0, val | 0x800); +} + +int ngbe_mv_suspend(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 val; + + if (adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP || + hw->ncsi_enabled) + return 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 0); + hw->phy.ops.read_reg_mdi(hw, 0, 0, &val); + + return hw->phy.ops.write_reg_mdi(hw, 0x0, 0x0, val | 0x800); + } else { + hw->phy.ops.write_reg_mdi(hw, 22, 0, 1); + hw->phy.ops.read_reg_mdi(hw, 0, 0, &val); + + return hw->phy.ops.write_reg_mdi(hw, 0x0, 0x0, val | 0x800); + } +} + +int ngbe_yt_suspend(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u16 val; + unsigned long flags; + + if (adapter->eth_priv_flags & NGBE_ETH_PRIV_FLAG_LLDP || + hw->ncsi_enabled) + return 0; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &val); + val |= 0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, val); + + /* power down in UTP mode */ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &val); + val |= 0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, val); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + return 0; +} + +static int ngbe_genphy_resume(struct ngbe_hw *hw) +{ + u16 val; + + hw->phy.ops.read_reg(hw, 0x0, 0x0, &val); + + return hw->phy.ops.write_reg(hw, 0x0, 0x0, val & (~0x800)); +} + +void ngbe_init_phy_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_phy_info *phy = &hw->phy; + + phy->ops.reset = ngbe_phy_reset; + phy->ops.read = ngbe_phy_read_reg_c22; + phy->ops.write = ngbe_phy_write_reg_c22; + phy->ops.read_reg = ngbe_phy_read_reg; + phy->ops.write_reg = ngbe_phy_write_reg; + phy->ops.setup_link = ngbe_phy_setup_link; + phy->ops.phy_suspend = ngbe_genphy_suspend; + phy->ops.phy_resume = ngbe_genphy_resume; + phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl; + phy->ops.check_overtemp = ngbe_phy_check_overtemp; + phy->ops.identify = ngbe_phy_identify; + phy->ops.init = ngbe_phy_init; + phy->ops.check_event = ngbe_phy_check_event; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement; + phy->ops.setup_once = ngbe_phy_setup; +} + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..cc7651996bf54bc1be04d5473f806c577c8fa6a2 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h @@ -0,0 +1,203 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_PHY_H_ +#define _NGBE_PHY_H_ + +#include "ngbe_type.h" +#include "ngbe.h" + +/* EEPROM byte offsets */ +#define NGBE_SFF_IDENTIFIER 0x0 +#define NGBE_SFF_IDENTIFIER_SFP 0x3 +#define NGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define NGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define NGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define NGBE_SFF_1GBE_COMP_CODES 0x6 +#define NGBE_SFF_10GBE_COMP_CODES 0x3 +#define NGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define NGBE_SFF_CABLE_SPEC_COMP 0x3C +#define NGBE_SFF_SFF_8472_SWAP 0x5C +#define NGBE_SFF_SFF_8472_COMP 0x5E +#define NGBE_SFF_SFF_8472_OSCB 0x6E +#define NGBE_SFF_SFF_8472_ESCB 0x76 +#define NGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define NGBE_SFF_QSFP_CONNECTOR 0x82 +#define NGBE_SFF_QSFP_10GBE_COMP 0x83 +#define NGBE_SFF_QSFP_1GBE_COMP 0x86 +#define NGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define NGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define NGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define NGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define NGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define NGBE_SFF_1GBASESX_CAPABLE 0x1 +#define NGBE_SFF_1GBASELX_CAPABLE 0x2 +#define NGBE_SFF_1GBASET_CAPABLE 0x8 +#define NGBE_SFF_10GBASESR_CAPABLE 0x10 +#define NGBE_SFF_10GBASELR_CAPABLE 0x20 +#define NGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define NGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define NGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define NGBE_SFF_ADDRESSING_MODE 0x4 +#define NGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define NGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define NGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define NGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define NGBE_I2C_EEPROM_READ_MASK 0x100 +#define NGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define NGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define NGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define NGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define NGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define NGBE_CS4227 0xBE /* CS4227 address */ +#define NGBE_CS4227_GLOBAL_ID_LSB 0 +#define NGBE_CS4227_SCRATCH 2 +#define NGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define NGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define NGBE_CS4227_RETRIES 5 +#define NGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define NGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define NGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define NGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define NGBE_CS4227_EDC_MODE_CX1 0x0002 +#define NGBE_CS4227_EDC_MODE_SR 0x0004 +#define NGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define NGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define NGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define NGBE_PE 0xE0 /* Port expander address */ +#define NGBE_PE_OUTPUT 1 /* Output register offset */ +#define NGBE_PE_CONFIG 3 /* Config register offset */ +#define NGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define NGBE_TAF_SYM_PAUSE (0x1) +#define NGBE_TAF_ASM_PAUSE (0x2) + +/* Bit-shift macros */ +#define NGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define NGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define NGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define NGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define NGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define NGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define NGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define NGBE_I2C_T_HD_STA 4 +#define NGBE_I2C_T_LOW 5 +#define NGBE_I2C_T_HIGH 4 +#define NGBE_I2C_T_SU_STA 5 +#define NGBE_I2C_T_HD_DATA 5 +#define NGBE_I2C_T_SU_DATA 1 +#define NGBE_I2C_T_RISE 1 +#define NGBE_I2C_T_FALL 1 +#define NGBE_I2C_T_SU_STO 4 +#define NGBE_I2C_T_BUF 5 + +#ifndef NGBE_SFP_DETECT_RETRIES +#define NGBE_SFP_DETECT_RETRIES 10 +#endif /* NGBE_SFP_DETECT_RETRIES */ + +/* SFP+ SFF-8472 Compliance */ +#define NGBE_SFF_SFF_8472_UNSUP 0x00 + +bool ngbe_check_reset_blocked(struct ngbe_hw *hw); +enum ngbe_phy_type ngbe_get_phy_type_from_id(struct ngbe_hw *hw); +void ngbe_init_phy_ops_common(struct ngbe_hw *hw); +int ngbe_phy_read_reg_mdi( struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_mdi( struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_read_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_read_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_read_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +int ngbe_phy_write_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +int ngbe_phy_init(struct ngbe_hw *hw); +int ngbe_phy_identify(struct ngbe_hw *hw); +int ngbe_phy_reset(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN); +u32 ngbe_phy_led_ctrl(struct ngbe_hw *hw); +int ngbe_phy_reset_m88e1512(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_m88e1512( struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_phy_check_overtemp(struct ngbe_hw *hw); + +int ngbe_mv_suspend(struct ngbe_hw *hw); +int ngbe_yt_suspend(struct ngbe_hw *hw); + +int ngbe_phy_check_event(struct ngbe_hw *hw); +int ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw); +int ngbe_phy_set_pause_advertisement_m88e1512(struct ngbe_hw *hw, + u16 pause_bit); +int ngbe_phy_get_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_get_lp_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_check_event_yt8521s(struct ngbe_hw *hw); +int ngbe_phy_get_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_get_lp_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit); +int ngbe_phy_reset_yt8521s(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_yt8521s( struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_phy_set_pause_advertisement_yt8521s(struct ngbe_hw *hw, + u16 pause_bit); +int ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw); +int ngbe_gphy_efuse_calibration(struct ngbe_hw *hw); + +int ngbe_gphy_dis_eee(struct ngbe_hw *hw); + + +#endif /* _NGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c new file mode 100644 index 0000000000000000000000000000000000000000..604eaff7283d3e81368a4448f3de9caed63d4fe4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_procfs.c @@ -0,0 +1,924 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef NGBE_PROCFS +#ifndef NGBE_SYSFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *ngbe_top_dir; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct ngbe_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +static int ngbe_fwbanner(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%s\n", adapter->eeprom_id); +} + +static int ngbe_porttype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + return snprintf(page, count, "%d\n", + test_bit(__NGBE_DOWN, &adapter->state)); +} + +static int ngbe_portspeed(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int speed = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case NGBE_LINK_SPEED_100_FULL: + speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + speed = 10; + break; + case NGBE_LINK_SPEED_10GB_FULL: + speed = 100; + break; + default: + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int ngbe_wqlflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int ngbe_xflowctl(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int ngbe_rxdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int ngbe_rxerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int ngbe_rxupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", rd32(hw, NGBE_TPR)); +} + +static int ngbe_rxmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int i, mprc = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + for (i = 0; i < 8; i++) + mprc += rd32(hw, NGBE_PX_MPRC(i)); + return snprintf(page, count, "%d\n", mprc); +} + +static int ngbe_rxbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW)); +} + +static int ngbe_txmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int ngbe_txdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int ngbe_rxframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int ngbe_rxbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int ngbe_txframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int ngbe_txbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int ngbe_linkstat(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int bitmask = 0; + u32 link_speed; + bool link_up = false; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + bitmask |= 1; + + /* always assume link is up, if no check link function */ + link_up = true; + if (link_up) + bitmask |= 2; + + if (adapter->old_lsc != adapter->lsc_int) { + bitmask |= 4; + adapter->old_lsc = adapter->lsc_int; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int ngbe_funcid(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%X\n", hw->bus.func); +} + +static int ngbe_funcvers(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%s\n", ngbe_driver_version); +} + +static int ngbe_macburn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int ngbe_macadmn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int ngbe_maclla1(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + int rc; + u16 eeprom_buff[6]; + u16 first_word = 0x37; + const u16 word_count = ARRAY_SIZE(eeprom_buff); + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + rc = hw->eeprom.ops.read_buffer(hw, first_word, 1, &first_word); + if (rc != 0) + return snprintf(page, count, + "error: reading pointer to the EEPROM\n"); + + if (first_word != 0x0000 && first_word != 0xFFFF) { + rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count, + eeprom_buff); + if (rc != 0) + return snprintf(page, count, "error: reading buffer\n"); + } else { + memset(eeprom_buff, 0, sizeof(eeprom_buff)); + } + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + default: + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); + } +} + +static int ngbe_mtusize(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int ngbe_featflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int bitmask = 0; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + if (adapter->netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +} + +static int ngbe_lsominct(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int ngbe_prommode(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int ngbe_txdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int ngbe_rxdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int ngbe_rxqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); +} + +static int ngbe_txqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + diff/adapter->num_tx_queues); +} + +static int ngbe_iovotype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "2\n"); +} + +static int ngbe_funcnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->num_vfs); +} + +static int ngbe_pciebnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->pdev->bus->number); +} + +static int ngbe_therm_dealarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->dalarm_thresh); +} + + +static int ngbe_therm_alarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->alarm_thresh); +} + +static int ngbe_therm_temp(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int status; + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = ngbe_get_thermal_sensor_data(therm_data->hw); + if (status != 0) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + + +struct ngbe_proc_type { + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct ngbe_proc_type ngbe_proc_entries[] = { + {"fwbanner", &ngbe_fwbanner}, + {"porttype", &ngbe_porttype}, + {"portspeed", &ngbe_portspeed}, + {"wqlflag", &ngbe_wqlflag}, + {"xflowctl", &ngbe_xflowctl}, + {"rxdrops", &ngbe_rxdrops}, + {"rxerrors", &ngbe_rxerrors}, + {"rxupacks", &ngbe_rxupacks}, + {"rxmpacks", &ngbe_rxmpacks}, + {"rxbpacks", &ngbe_rxbpacks}, + {"txdrops", &ngbe_txdrops}, + {"txerrors", &ngbe_txerrors}, + {"txupacks", &ngbe_txupacks}, + {"txmpacks", &ngbe_txmpacks}, + {"txbpacks", &ngbe_txbpacks}, + {"rxframes", &ngbe_rxframes}, + {"rxbytes", &ngbe_rxbytes}, + {"txframes", &ngbe_txframes}, + {"txbytes", &ngbe_txbytes}, + {"linkstat", &ngbe_linkstat}, + {"funcid", &ngbe_funcid}, + {"funcvers", &ngbe_funcvers}, + {"macburn", &ngbe_macburn}, + {"macadmn", &ngbe_macadmn}, + {"maclla1", &ngbe_maclla1}, + {"mtusize", &ngbe_mtusize}, + {"featflag", &ngbe_featflag}, + {"lsominct", &ngbe_lsominct}, + {"prommode", &ngbe_prommode}, + {"txdscqsz", &ngbe_txdscqsz}, + {"rxdscqsz", &ngbe_rxdscqsz}, + {"txqavg", &ngbe_txqavg}, + {"rxqavg", &ngbe_rxqavg}, + {"iovotype", &ngbe_iovotype}, + {"funcnbr", &ngbe_funcnbr}, + {"pciebnbr", &ngbe_pciebnbr}, + {"", NULL} +}; + +struct ngbe_proc_type ngbe_internal_entries[] = { + {"temp", &ngbe_therm_temp}, + {"alarmthresh", &ngbe_therm_alarmthresh}, + {"dealarmthresh", &ngbe_therm_dealarmthresh}, + {"", NULL} +}; + +void ngbe_del_proc_entries(struct ngbe_adapter *adapter) +{ + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (ngbe_top_dir == NULL) + return; + + for (i = 0; i < NGBE_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (ngbe_internal_entries[index].read == NULL) + break; + + remove_proc_entry(ngbe_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (ngbe_proc_entries[index].read == NULL) + break; + remove_proc_entry(ngbe_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), ngbe_top_dir); +} + +/* called from ngbe_main.c */ +void ngbe_procfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_del_proc_entries(adapter); +} + +int ngbe_procfs_topdir_init(void) +{ + ngbe_top_dir = proc_mkdir("driver/ngbe", NULL); + if (ngbe_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void ngbe_procfs_topdir_exit(void) +{ + remove_proc_entry("driver/ngbe", NULL); +} + +/* called from ngbe_main.c */ +int ngbe_procfs_init(struct ngbe_adapter *adapter) +{ + int rc = 0; + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + adapter->therm_dir = NULL; + + if (ngbe_top_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ngbe_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ngbe_proc_entries[index].read == NULL) + break; + if (!(create_proc_read_entry(ngbe_proc_entries[index].name, + 0444, + adapter->info_dir, + ngbe_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (!adapter->hw->ops.init_thermal_sensor_thresh(hw)) + goto exit; + + + snprintf(buf, sizeof(buf), "sensor"); + adapter->therm_dir = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ngbe_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data.hw = &adapter->hw; + adapter->therm_data.sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor; + + if (!(create_proc_read_entry( + ngbe_internal_entries[index].name, + 0444, + adapter->therm_dir, + ngbe_internal_entries[index].read, + &adapter->therm_data))) { + rc = -ENOMEM; + goto fail; + } + } + + goto exit; + +fail: + ngbe_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !NGBE_SYSFS */ +#endif /* NGBE_PROCFS */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..e6581c229a9d420198cecf0ebac8d21810bf97e6 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c @@ -0,0 +1,887 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#include "ngbe.h" +#include + +/* + * SYSTIME is defined by a fixed point system which allows the user to + * define the scale counter increment value at every level change of + * the oscillator driving SYSTIME value. The time unit is determined by + * the clock frequency of the oscillator and TIMINCA register. + * The cyclecounter and timecounter structures are used to to convert + * the scale counter into nanoseconds. SYSTIME registers need to be converted + * to ns values by use of only a right shift. + * The following math determines the largest incvalue that will fit into + * the available bits in the TIMINCA register: + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator, which changes based on the link + * speed: + * At 10Gb link or no link, the period is 6.4 ns. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * round(): discard the fractional portion of the calculation + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * LinkSpeed ClockFreq ClockPeriod TIMINCA:IV + * 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns) + * 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns) + * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) + * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) + * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) + * + * These diagrams are only for the 10Gb link period + * + * +--------------+ +--------------+ + * | 32 | | 8 | 3 | 20 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 43 bit SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define NGBE_INCVAL_10GB 0xCCCCCC +#define NGBE_INCVAL_1GB 0x2000000/*in Emerald all speed is same*/ +#define NGBE_INCVAL_100 0xA00000 +#define NGBE_INCVAL_10 0xC7F380 +#define NGBE_INCVAL_FPGA 0x800000 + +#define NGBE_INCVAL_SHIFT_10GB 20 +#define NGBE_INCVAL_SHIFT_1GB 22/*in Emerald all speed is same*/ +#define NGBE_INCVAL_SHIFT_100 15 +#define NGBE_INCVAL_SHIFT_10 12 +#define NGBE_INCVAL_SHIFT_FPGA 17 + +#define NGBE_OVERFLOW_PERIOD (HZ * 30) +#define NGBE_PTP_TX_TIMEOUT (HZ) + +/** + * ngbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 ngbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct ngbe_adapter *adapter = + container_of(hw_cc, struct ngbe_adapter, hw_cc); + struct ngbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIML); + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIMH) << 32; + + return stamp; +} + +/** + * ngbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ngbe_ptp_convert_to_hwtstamp(struct ngbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + + +/** + * ngbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ + #ifndef HAVE_NOT_PTT_ADJFREQ +static int ngbe_ptp_adjfreq(struct ptp_clock_info *ptp, int ppb) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + struct ngbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + /* temp setting*/ + + if (incval > NGBE_TSEC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + return 0; +} +#endif + +/** + * ngbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int ngbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ngbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int ngbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * ngbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ngbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +static int ngbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = ngbe_ptp_gettime64(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + + return 0; +} + +static int ngbe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return ngbe_ptp_settime64(ptp, &ts64); +} +#endif + +/** + * ngbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ngbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -ENOTSUPP; +} + +/** + * ngbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter) +{ + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; +} + +/** + * ngbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + NGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + ngbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ngbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, NGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * ngbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ngbe_ptp_clear_tx_timestamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + rd32(hw, NGBE_TSEC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__NGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * ngbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void ngbe_ptp_tx_hwtstamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ngbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, struct ngbe_adapter, + ptp_tx_work); + struct ngbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + NGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ngbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, NGBE_TSEC_1588_CTL); + if (tsynctxctl & NGBE_TSEC_1588_CTL_VALID) { + ngbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + ngbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * ngbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * ngbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * ngbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private ngbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int ngbe_ptp_set_timestamp_mode(struct ngbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = NGBE_TSEC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = NGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, NGBE_TSEC_1588_CTL); + regval &= ~NGBE_TSEC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, NGBE_TSEC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, NGBE_PSR_1588_CTL); + regval &= ~(NGBE_PSR_1588_CTL_ENABLED | NGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, NGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, NGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + NGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + ngbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, NGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * ngbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ngbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void ngbe_ptp_link_speed_adjust(struct ngbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + + *shift = NGBE_INCVAL_SHIFT_1GB; + *incval = NGBE_INCVAL_1GB; + + return; +} + +/** + * ngbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = ngbe_ptp_read; + ngbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ngbe_ptp_reset + * @adapter: the ngbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void ngbe_ptp_reset(struct ngbe_adapter *adapter) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + ngbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + ngbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; +} + +/** + * ngbe_ptp_create_clock + * @adapter: the ngbe private adapter structure + * + * This function performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by ngbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ + +static long ngbe_ptp_create_clock(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 500000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; +#ifndef HAVE_NOT_PTT_ADJFREQ + adapter->ptp_caps.adjfreq = ngbe_ptp_adjfreq; +#endif + adapter->ptp_caps.adjtime = ngbe_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + adapter->ptp_caps.gettime64 = ngbe_ptp_gettime64; + adapter->ptp_caps.settime64 = ngbe_ptp_settime64; +#else + adapter->ptp_caps.gettime = ngbe_ptp_gettime; + adapter->ptp_caps.settime = ngbe_ptp_settime; +#endif + adapter->ptp_caps.enable = ngbe_ptp_feature_enable; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * ngbe_ptp_init + * @adapter: the ngbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ngbe_ptp_init(struct ngbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (ngbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can intialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, ngbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + ngbe_ptp_reset(adapter); + + /* enter the NGBE_PTP_RUNNING state */ + set_bit(__NGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * ngbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void ngbe_ptp_suspend(struct ngbe_adapter *adapter) +{ + /* leave the NGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__NGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_PTP_PPS_ENABLED; + + cancel_work_sync(&adapter->ptp_tx_work); + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void ngbe_ptp_stop(struct ngbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + ngbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..09ccd8fb33be638fdfe3acc70086278d81b43dcd --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.c @@ -0,0 +1,1590 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_type.h" +#include "ngbe_sriov.h" + +#ifdef CONFIG_PCI_IOV +static int __ngbe_enable_sriov(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + adapter->flags |= NGBE_FLAG_SRIOV_ENABLED; + e_dev_info("SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= NGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (NGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, NGBE_PSR_CTL_SW_EN); + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (!adapter->vfinfo) { + adapter->num_vfs = 0; + e_dev_info("failed to allocate memory for VF Data Storage\n"); + return -ENOMEM; + } + + /* enable L2 switch and replication */ + adapter->flags |= NGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + NGBE_FLAG_SRIOV_REPLICATION_ENABLE; + // NGBE_FLAG_SRIOV_REPLICATION_ENABLE not used + +#ifdef NGBE_DISABLE_VF_MQ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; +#endif + + /* enable spoof checking for all VFs */ + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + +#endif + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = NGBEVF_XCAST_MODE_NONE; + } + + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_NUM_VT_MASK, NGBE_CFG_PORT_CTL_NUM_VT_8); + + return 0; +} + +#define NGBE_BA4_ADDR(vfinfo, reg) \ + ((u8 __iomem *)((u8 *)(vfinfo)->b4_addr + (reg))) + +/** + * ngbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ngbe_get_vfs(struct ngbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + struct vf_data_storage *vfinfo; + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + + /*pci_dev_get(vfdev);*/ + vfinfo = &adapter->vfinfo[vf]; + vfinfo->vfdev = vfdev; + vfinfo->b4_addr = ioremap(pci_resource_start(vfdev, 4), 64); + + ++vf; + } +} + +/** + * ngbe_pet_vfs - Release references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ngbe_put_vfs(struct ngbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct vf_data_storage *vfinfo; + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + + vfinfo = &adapter->vfinfo[vf]; + iounmap(vfinfo->b4_addr); + vfinfo->b4_addr = NULL; + vfinfo->vfdev = NULL; + /*pci_dev_put(vfdev);*/ + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ngbe_enable_sriov(struct ngbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + e_dev_warn("SR-IOV already disabled\n"); + return; + } + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device -" + "Please reload all VF drivers to avoid spoofed packet " + "errors\n"); + } else { + int err; + /* + * The sapphire supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, + NGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__ngbe_enable_sriov(adapter)) { + ngbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + ngbe_disable_sriov(adapter); +} +#endif /* CONFIG_PCI_IOV */ + +int ngbe_disable_sriov(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ +#ifdef CONFIG_PCI_IOV + ngbe_put_vfs(adapter); +#endif + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return 0; + +#if 0 +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif +#endif + + /* set default pool back to 0 */ + wr32m(hw, NGBE_PSR_VM_CTL, + NGBE_PSR_VM_CTL_POOL_MASK, 0); + NGBE_WRITE_FLUSH(hw); + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + /* take a breather then clean up driver data */ + msleep(100); + + adapter->flags &= ~NGBE_FLAG_SRIOV_ENABLED; + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + } + + return 0; +} + +static int ngbe_set_vf_multicasts(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u16 entries = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) + >> NGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + + /* only so many hash values supported */ + entries = min(entries, (u16)NGBE_MAX_VF_MC_ENTRIES); + + /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* VFs are limited to using the MTA hash table for their multicast + * addresses */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + /* errata 5: maintain a copy of the register table conf */ + mta_reg = hw->mac.mta_shadow[vector_reg]; + mta_reg |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] = mta_reg; + wr32(hw, NGBE_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); + + return 0; +} + +void ngbe_restore_vf_multicasts(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + u32 i, j; + u32 vector_bit; + u32 vector_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(i)); + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + wr32m(hw, NGBE_PSR_MC_TBL(vector_reg), + 1 << vector_bit, 1 << vector_bit); + /* errata 5: maintain a copy of the reg table conf */ + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, NGBE_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + ngbe_full_sync_mac_table(adapter); +} + +int ngbe_set_vf_vlan(struct ngbe_adapter *adapter, int add, int vid, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add); +} + +static int ngbe_set_vf_lpe(struct ngbe_adapter *adapter, u32 max_frame, + u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 max_frs, reg_val; + + /* + * For sapphire we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 vf_shift, vfre; + s32 err = 0; + + + + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + fallthrough; + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf; + + /* enable or disable receive depending on error */ + vfre = rd32(hw, NGBE_RDM_POOL_RE); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + wr32(hw, NGBE_RDM_POOL_RE, vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(hw, NGBE_MAC_WDG_TIMEOUT) & + NGBE_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + NGBE_MAC_WDG_TIMEOUT_WTO_DELTA)) { + wr32(hw, NGBE_MAC_WDG_TIMEOUT, + max_frs - NGBE_MAC_WDG_TIMEOUT_WTO_DELTA); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +void ngbe_set_vmolr(struct ngbe_hw *hw, u16 vf, bool aupe) +{ + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + vmolr |= NGBE_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= NGBE_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~NGBE_PSR_VM_L2CTL_AUPE; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); +} + +static void ngbe_set_vmvir(struct ngbe_adapter *adapter, + u16 vid, u16 qos, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + NGBE_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(hw, NGBE_TDM_VLAN_INS(vf), vmvir); +} + +static void ngbe_clear_vmvir(struct ngbe_adapter *adapter, u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_TDM_VLAN_INS(vf), 0); +} + +static inline void ngbe_vf_reset_event(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + ngbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ngbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ngbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ngbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ngbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ngbe_set_rx_mode(adapter->netdev); + + ngbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ngbe_mbox_api_10; +} + +int ngbe_set_vf_mac(struct ngbe_adapter *adapter, + u16 vf, unsigned char *mac_addr) +{ + s32 retval = 0; + ngbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = ngbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; +} + +static int ngbe_negotiate_vf_api(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ngbe_mbox_api_10: + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ngbe_get_vf_queues(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_20: + case ngbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[NGBE_VF_TX_QUEUES] = 1; + msgbuf[NGBE_VF_RX_QUEUES] = 1; + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[NGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[NGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[NGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[NGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int ngbe_get_vf_link_status(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + break; + default: + return -1; + } + + if (adapter->link_up) + msgbuf[1] = NGBE_VF_STATUS_LINKUP; + else + msgbuf[1] = 0; + + return 0; +} + +static int ngbe_set_vf_macvlan(struct ngbe_adapter *adapter, + u16 vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + ngbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list manangbeent required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + retval = ngbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +#ifdef CONFIG_PCI_IOV +int ngbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x7); + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static inline void ngbe_write_qde(struct ngbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + u32 reg = 0; + u32 i = vf * q_per_pool; + + reg = rd32(hw, NGBE_RDM_PF_QDE); + reg |= qde << i; + + wr32(hw, NGBE_RDM_PF_QDE, reg); + +} + +static inline void ngbe_write_hide_vlan(struct ngbe_adapter *adapter, u32 vf, + u32 hide_vlan) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + u32 reg = 0; + u32 i = vf * q_per_pool; + reg = rd32(hw, NGBE_RDM_PF_HIDE); + + if (hide_vlan == 1) + reg |= hide_vlan << i; + else + reg &= hide_vlan << i; + + wr32(hw, NGBE_RDM_PF_HIDE, reg); +} + +static int ngbe_vf_reset_msg(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + struct net_device *dev = adapter->netdev; + int pf_max_frame; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ngbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + ngbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf; + + /* enable transmit for vf */ + wr32m(hw, NGBE_TDM_POOL_TE, + 1 << vf, 1 << vf); + + /* force drop enable for all VF Rx queues */ + ngbe_write_qde(adapter, vf, 1); + + /* enable receive for vf */ + reg = rd32(hw, NGBE_RDM_POOL_RE); + reg |= 1 << vf_shift; + + pf_max_frame = dev->mtu + ETH_HLEN; + + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + wr32(hw, NGBE_RDM_POOL_RE, reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = NGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= NGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to " + "assign one manually\n", vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ngbe_write_mbx(hw, msgbuf, NGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ngbe_set_vf_mac_addr(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; + e_warn(drv, + "VF %d attempted to set a new MAC address but it already " + "has an administratively set MAC address " + "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + e_warn(drv, "Check the VF driver and if it is not using the " + "correct MAC address you may need to reload the VF " + "driver\n"); + return -1; + } + return ngbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +#ifdef CONFIG_PCI_IOV +static int ngbe_find_vlvf_entry(struct ngbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(hw, NGBE_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) + regindex = -1; + + return regindex; +} +#endif /* CONFIG_PCI_IOV */ + +static int ngbe_set_vf_vlan_msg(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >> NGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & NGBE_PSR_VLAN_SWC_VLANID_MASK); + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = ngbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = ngbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + u32 bits = 0, vlvf; + s32 reg_ndx; + + reg_ndx = ngbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(hw, NGBE_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << VMDQ_P(0)); + } else { + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && +#ifndef HAVE_VLAN_RX_REGISTER + !test_bit(vid, adapter->active_vlans) && +#endif + !bits) + ngbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif + return err; +} + +static int ngbe_set_vf_macvlan_msg(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >> + NGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } +#if defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + ngbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); +#endif /* defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) */ + } + + err = ngbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no " + "space for it\n", + vf); + + return err < 0; +} + +static int ngbe_update_vf_xcast_mode(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == NGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ngbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } +#if 0 /* trust all vf */ + if (xcast_mode > NGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = NGBEVF_XCAST_MODE_MULTI; + } +#endif /* trust all vf */ + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case NGBEVF_XCAST_MODE_NONE: + disable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case NGBEVF_XCAST_MODE_MULTI: + disable = NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE; + break; + case NGBEVF_XCAST_MODE_ALLMULTI: + disable = NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE; + break; + case NGBEVF_XCAST_MODE_PROMISC: + disable = 0; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int ngbe_rcv_msg_from_vf(struct ngbe_adapter *adapter, u16 vf) +{ + u16 mbx_size = NGBE_VXMAILBOX_SIZE; + u32 msgbuf[NGBE_VXMAILBOX_SIZE]; + struct ngbe_hw *hw = &adapter->hw; + s32 retval; + + retval = ngbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (NGBE_VT_MSGTYPE_ACK | NGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + NGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == NGBE_VF_RESET) + return ngbe_vf_reset_msg(adapter, vf); + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + ngbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case NGBE_VF_SET_MAC_ADDR: + retval = ngbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_MULTICAST: + retval = ngbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_VLAN: + retval = ngbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_LPE: + if (msgbuf[1] > NGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d exceed MAX_JUMBO_FRAME_SIZE\n", msgbuf[1]); + return -EINVAL; + } + retval = ngbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case NGBE_VF_SET_MACVLAN: + retval = ngbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case NGBE_VF_API_NEGOTIATE: + retval = ngbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case NGBE_VF_GET_QUEUES: + retval = ngbe_get_vf_queues(adapter, msgbuf, vf); + break; + case NGBE_VF_UPDATE_XCAST_MODE: + retval = ngbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + case NGBE_VF_GET_LINK_STATUS: + retval = ngbe_get_vf_link_status(adapter, msgbuf, vf); + break; + case NGBE_VF_BACKUP: + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = NGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= NGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= NGBE_VT_MSGTYPE_CTS; + + ngbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void ngbe_rcv_ack_from_vf(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 msg = NGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + ngbe_write_mbx(hw, &msg, 1, vf); +} + +void ngbe_msg_task(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ngbe_check_for_rst(hw, vf)) + ngbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!ngbe_check_for_msg(hw, vf)) + ngbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!ngbe_check_for_ack(hw, vf)) + ngbe_rcv_ack_from_vf(adapter, vf); + } +} + +void ngbe_disable_tx_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + wr32(hw, NGBE_TDM_POOL_TE, 0); + wr32(hw, NGBE_RDM_POOL_RE, 0); +} + +#ifdef HAVE_NDO_SET_VF_TRUST +static inline void ngbe_ping_vf(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 ping; + + ping = NGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, &ping, 1, vf); +} +#endif + +void ngbe_ping_all_vfs(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 ping; + u16 i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = NGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, &ping, 1, i); + } +} + +void ngbe_ping_all_vfs_with_link_status(struct ngbe_adapter *adapter, bool link_up) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 msgbuf[2]; + u16 i; + u32 link_speed = adapter->link_speed; + + msgbuf[0] = NGBE_NOFITY_VF_LINK_STATUS | NGBE_PF_CONTROL_MSG; + msgbuf[1] = (link_speed << 1) | link_up; + for (i = 0 ; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) + msgbuf[0] |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, msgbuf, 2, i); + } +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int ngbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ngbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} +#endif + +#ifdef CONFIG_PCI_IOV +static int ngbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (!(adapter->flags & NGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + e_dev_warn("SR-IOV already disabled\n"); + return -EOPNOTSUPP; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ngbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 8 we limit the actual number that can be allocated to 7 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if ((num_vfs + adapter->num_vmdqs) > NGBE_MAX_VF_FUNCTIONS) { + err = -EPERM; + goto err_out; + } + + adapter->num_vfs = num_vfs; + + err = __ngbe_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + ngbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + ngbe_get_vfs(adapter); + msleep(100); + ngbe_sriov_reinit(adapter); +out: + return num_vfs; +err_out: + return err; +} + +static int ngbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = ngbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) + ngbe_sriov_reinit(adapter); + + return err; +} +#endif + +int ngbe_pci_sriov_configure(struct pci_dev __maybe_unused *dev, + int __maybe_unused num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return ngbe_pci_sriov_disable(dev); + else + return ngbe_pci_sriov_enable(dev, num_vfs); +#else + return 0; +#endif +} + +#ifdef IFLA_VF_MAX +int ngbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + s32 retval = 0; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = ngbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__NGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF " + "device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to " + "use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address was NOT set due to invalid or " + "duplicate MAC address.\n"); + } + + return retval; +} + +static int ngbe_enable_port_vlan(struct ngbe_adapter *adapter, + int vf, u16 vlan, u8 qos) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + err = ngbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + ngbe_set_vmvir(adapter, vlan, qos, vf); + ngbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + /* enable hide vlan */ + ngbe_write_qde(adapter, vf, 1); + ngbe_write_hide_vlan(adapter, vf, 1); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__NGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not " + "up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF " + "device.\n"); + } + +out: + return err; +} + +static int ngbe_disable_port_vlan(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + err = ngbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ngbe_clear_vmvir(adapter, vf); + ngbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + /* disable hide vlan */ + ngbe_write_hide_vlan(adapter, vf, 0); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +#else +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif +{ + int err = 0; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = ngbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = ngbe_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + err = ngbe_disable_port_vlan(adapter, vf); + } +out: + return err; +} +#endif /* IFLA_VF_MAX */ +#if 0 +static void ngbe_set_vf_rate_limit(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ngbe_hw *hw = &adapter->hw; + u32 bcnrc_val; + u16 queue, queues_per_pool; + u16 max_tx_rate = adapter->vfinfo[vf].max_tx_rate; +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + u16 min_tx_rate = adapter->vfinfo[vf].min_tx_rate; +#endif + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + max_tx_rate /= queues_per_pool; + bcnrc_val = NGBE_TDM_RP_RATE_MAX(max_tx_rate); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + min_tx_rate /= queues_per_pool; + bcnrc_val |= NGBE_TDM_RP_RATE_MIN(min_tx_rate); +#endif + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, NGBE_TDM_MMW, 0x14); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + wr32(hw, NGBE_TDM_RP_IDX, reg_idx); + wr32(hw, NGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, NGBE_TDM_RP_CTL, + NGBE_TDM_RP_CTL_RLEN, NGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, NGBE_TDM_RP_CTL, + NGBE_TDM_RP_CTL_RLEN, 0); + } +} +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int ngbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate) +#else +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 1 or 10 Gbps */ + if (adapter->link_speed < NGBE_LINK_SPEED_1GB_FULL) + return -EINVAL; + + /* store values */ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + adapter->vfinfo[vf].min_tx_rate = min_tx_rate; +#endif + adapter->vfinfo[vf].max_tx_rate = max_tx_rate; + + return 0; +} + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int ngbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + if (vf < 32) { + regval = (setting << vf); + wr32m(hw, NGBE_TDM_MAC_AS_L, + regval | (1 << vf), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, NGBE_TDM_VLAN_AS_L, + regval | (1 << vf), regval); + } + } + + return 0; +} +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ +int ngbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + ivi->min_tx_rate = adapter->vfinfo[vf].min_tx_rate; +#else + ivi->tx_rate = adapter->vfinfo[vf].max_tx_rate; +#endif + + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = adapter->vfinfo[vf].trusted; +#endif + + return 0; +} +#endif /* IFLA_VF_MAX */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..2e15d8202695b985d7613d9d1f7106383330828c --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sriov.h @@ -0,0 +1,76 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_SRIOV_H_ +#define _NGBE_SRIOV_H_ + +/* ngbe driver limit the max number of VFs could be enabled to + * 7 (NGBE_MAX_VF_FUNCTIONS - 1) + */ +#define NGBE_MAX_VFS_DRV_LIMIT (NGBE_MAX_VF_FUNCTIONS - 1) + +void ngbe_restore_vf_multicasts(struct ngbe_adapter *adapter); +int ngbe_set_vf_vlan(struct ngbe_adapter *adapter, int add, int vid, u16 vf); +void ngbe_set_vmolr(struct ngbe_hw *hw, u16 vf, bool aupe); +void ngbe_msg_task(struct ngbe_adapter *adapter); +int ngbe_set_vf_mac(struct ngbe_adapter *adapter, + u16 vf, unsigned char *mac_addr); +void ngbe_disable_tx_rx(struct ngbe_adapter *adapter); +void ngbe_ping_all_vfs(struct ngbe_adapter *adapter); +void ngbe_ping_all_vfs_with_link_status(struct ngbe_adapter *adapter, bool link_up); + +#ifdef IFLA_VF_MAX +int ngbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); +#else +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); +#else +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int ngbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +int ngbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif +int ngbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +#endif /* IFLA_VF_MAX */ +int ngbe_disable_sriov(struct ngbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +int ngbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ngbe_enable_sriov(struct ngbe_adapter *adapter); +#endif +int ngbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); + +#define NGBE_VF_STATUS_LINKUP 0x1 + +/* + * These are defined in ngbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +//#define NGBE_DEV_ID_SP_VF 0x1000 +#endif /* _NGBE_SRIOV_H_ */ + diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..fce6e76df3aaa4cb913fc90e769f0bb13fc18bb8 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c @@ -0,0 +1,226 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef NGBE_SYSFS + +#include +#include +#include +#include +#include +#include +#include +#ifdef NGBE_HWMON +#include +#endif + +#ifdef NGBE_HWMON +/* hwmon callback functions */ +static ssize_t ngbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + struct ngbe_hw *hw = ngbe_attr->hw; + unsigned int value; + + /* reset the temp field */ + hw->mac.ops.get_thermal_sensor_data(hw); + + value = ngbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_alarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->alarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_dalarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->dalarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * ngbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ngbe_add_hwmon_attr(struct ngbe_adapter *adapter, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *ngbe_attr; + + n_attr = adapter->ngbe_hwmon_buff.n_hwmon; + ngbe_attr = &adapter->ngbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case NGBE_HWMON_TYPE_TEMP: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_temp; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_input", 0); + break; + case NGBE_HWMON_TYPE_ALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_alarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_alarmthresh", 0); + break; + case NGBE_HWMON_TYPE_DALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_dalarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_dalarmthresh", 0); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ngbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor; + ngbe_attr->hw = &adapter->hw; + ngbe_attr->dev_attr.store = NULL; + ngbe_attr->dev_attr.attr.mode = S_IRUGO; + ngbe_attr->dev_attr.attr.name = ngbe_attr->name; + + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &ngbe_attr->dev_attr); + + if (rc == 0) + ++adapter->ngbe_hwmon_buff.n_hwmon; + + return rc; +} +#endif /* NGBE_HWMON */ + +static void ngbe_sysfs_del_adapter( + struct ngbe_adapter __maybe_unused *adapter) +{ +#ifdef NGBE_HWMON + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->ngbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->ngbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ngbe_hwmon_buff.hwmon_list); + + if (adapter->ngbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ngbe_hwmon_buff.device); +#endif /* NGBE_HWMON */ +} + +/* called from ngbe_main.c */ +void ngbe_sysfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_sysfs_del_adapter(adapter); +} + +/* called from ngbe_main.c */ +int ngbe_sysfs_init(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int rc = 0; +#ifdef NGBE_HWMON + struct hwmon_buff *ngbe_hwmon = &adapter->ngbe_hwmon_buff; + int n_attrs; + +#endif /* NGBE_HWMON */ + if (adapter == NULL) + goto err; + +#ifdef NGBE_HWMON + + /* Don't create thermal hwmon interface if no sensors present */ + if (hw->mac.ops.init_thermal_sensor_thresh(hw)) + goto no_thermal; + + /* + * Allocation space for max attributs + * max num sensors * values (temp, alamthresh, dalarmthresh) + */ + n_attrs = 3; + ngbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!ngbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + ngbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(ngbe_hwmon->device)) { + rc = PTR_ERR(ngbe_hwmon->device); + goto err; + } + + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_TEMP); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_ALARMTHRESH); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_DALARMTHRESH); + if (rc) + goto err; + +no_thermal: +#endif /* NGBE_HWMON */ + goto exit; + +err: + ngbe_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif /* NGBE_SYSFS */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 72c8cd2d557513e24c510aa3fdc89dc01871e2eb..95e6edc49e8f33f19e3de17d92cf32f7806b4558 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -1,136 +1,3030 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + #ifndef _NGBE_TYPE_H_ #define _NGBE_TYPE_H_ -#include -#include +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - NGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - NGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - NGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - NGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - NGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - NGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ngbe_osdep.h" + +#define NGBE_NO_LRO + + +/* Override this by setting IOMEM in your ngbe_osdep.h header */ +#ifndef IOMEM +#define IOMEM +#endif + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif + +/************ ngbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_TRUSTNETIC +#define PCI_VENDOR_ID_TRUSTNETIC 0x8088 +#endif -/************ NGBE_register.h ************/ /* Device IDs */ -#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100 -#define NGBE_DEV_ID_EM_WX1860A2 0x0101 -#define NGBE_DEV_ID_EM_WX1860A2S 0x0102 -#define NGBE_DEV_ID_EM_WX1860A4 0x0103 -#define NGBE_DEV_ID_EM_WX1860A4S 0x0104 -#define NGBE_DEV_ID_EM_WX1860AL2 0x0105 -#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106 -#define NGBE_DEV_ID_EM_WX1860AL4 0x0107 -#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108 -#define NGBE_DEV_ID_EM_WX1860LC 0x0109 -#define NGBE_DEV_ID_EM_WX1860A1 0x010a -#define NGBE_DEV_ID_EM_WX1860A1L 0x010b +/* copper */ +#define NGBE_DEV_ID_EM_TEST 0x0000 +#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100 +#define NGBE_DEV_ID_EM_WX1860A2 0x0101 +#define NGBE_DEV_ID_EM_WX1860A2S 0x0102 +#define NGBE_DEV_ID_EM_WX1860A4 0x0103 +#define NGBE_DEV_ID_EM_WX1860A4S 0x0104 +#define NGBE_DEV_ID_EM_WX1860AL2 0x0105 +#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106 +#define NGBE_DEV_ID_EM_WX1860AL4 0x0107 +#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108 +#define NGBE_DEV_ID_EM_WX1860NCSI 0x0109 +#define NGBE_DEV_ID_EM_WX1860A1 0x010a +#define NGBE_DEV_ID_EM_WX1860A1L 0x010b + + + + +/* transfer units */ +#define NGBE_KB_TO_B 1024 + +/* Revision ID */ +#define NGBE_SP_MPW 1 /* Subsystem ID */ -#define NGBE_SUBID_M88E1512_SFP 0x0003 -#define NGBE_SUBID_OCP_CARD 0x0040 -#define NGBE_SUBID_LY_M88E1512_SFP 0x0050 -#define NGBE_SUBID_M88E1512_RJ45 0x0051 -#define NGBE_SUBID_M88E1512_MIX 0x0052 -#define NGBE_SUBID_YT8521S_SFP 0x0060 -#define NGBE_SUBID_INTERNAL_YT8521S_SFP 0x0061 -#define NGBE_SUBID_YT8521S_SFP_GPIO 0x0062 -#define NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO 0x0064 -#define NGBE_SUBID_LY_YT8521S_SFP 0x0070 -#define NGBE_SUBID_RGMII_FPGA 0x0080 - -#define NGBE_OEM_MASK 0x00FF - -#define NGBE_NCSI_SUP 0x8000 -#define NGBE_NCSI_MASK 0x8000 -#define NGBE_WOL_SUP 0x4000 -#define NGBE_WOL_MASK 0x4000 - -/**************** EM Registers ****************************/ +#define NGBE_WX1860AL_INTERNAL 0x0410 +#define NGBE_WX1860AL_M88E1512_SFP 0x0403 +#define NGBE_WX1860AL_YT8521S_SFP 0x0460 + +#define NGBE_SUBSYSTEM_ID_EM_SF100F_LP 0x0103 +#define NGBE_SUBSYSTEM_ID_EM_SF100HF_LP 0x0103 +#define NGBE_SUBSYSTEM_ID_EM_SF200T 0x0201 +#define NGBE_SUBSYSTEM_ID_EM_SF200T_S 0x0210 +#define NGBE_SUBSYSTEM_ID_EM_SF400T 0x0401 +#define NGBE_SUBSYSTEM_ID_EM_SF400T_S 0x0410 +#define NGBE_SUBSYSTEM_ID_EM_SF200HT 0x0202 +#define NGBE_SUBSYSTEM_ID_EM_SF200HT_S 0x0220 +#define NGBE_SUBSYSTEM_ID_EM_SF400HT 0x0402 +#define NGBE_SUBSYSTEM_ID_EM_SF400HT_S 0x0420 +#define NGBE_SUBSYSTEM_ID_EM_SF200HXT 0x0230 +#define NGBE_SUBSYSTEM_ID_EM_SF400HXT 0x0430 +#define NGBE_SUBSYSTEM_ID_EM_SF400_OCP 0x0440 +#define NGBE_SUBSYSTEM_ID_EM_SF400_LY 0x0450 +#define NGBE_SUBSYSTEM_ID_EM_SF400_LY_YT 0x0470 + +#define M88E1512_SFP 0x0003 +#define OCP_CARD 0x0040 +#define LY_M88E1512_SFP 0x0050 +#define M88E1512_RJ45 0x0051 +#define M88E1512_MIX 0x0052 +#define YT8521S_SFP 0x0060 +#define LY_YT8521S_SFP 0x0070 +#define INTERNAL_YT8521S_SFP 0x0061 +#define YT8521S_SFP_GPIO 0x0062 +#define INTERNAL_YT8521S_SFP_GPIO 0x0064 +#define RGMII_FPGA 0x0080 + +#define OEM_MASK 0x00FF +#define INTERNAL_SFP_MASK 0x00FF + +#define NCSI_SUP 0x8000 +#define NCSI_SUP_MASK 0x8000 + +#define WOL_SUP 0x4000 +#define WOL_SUP_MASK 0x4000 + + +/* MDIO Manageable Devices (MMDs). */ +#define NGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ +#define NGBE_MDIO_PCS_DEV_TYPE 0x3 /* Physical Coding Sublayer*/ +#define NGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ +#define NGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ + +/* phy register definitions */ +/* VENDOR_SPECIFIC_1_DEV regs */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ + +/* AUTO_NEG_DEV regs */ +#define NGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define NGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define NGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Reg */ +#define NGBE_MDIO_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG RX LP Status + * Reg */ +#define NGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define NGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define NGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ + + +#define NGBE_MDIO_AUTO_NEG_1000BASE_EEE_ADVT 0x4 +#define NGBE_MDIO_AUTO_NEG_100BASE_EEE_ADVT 0x2 +#define NGBE_MDIO_AUTO_NEG_LP_1000BASE_CAP 0x8000 + +#define NGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define NGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define NGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define NGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define NGBE_MII_RESTART 0x200 +#define NGBE_MII_AUTONEG_COMPLETE 0x20 +#define NGBE_MII_AUTONEG_LINK_UP 0x04 +#define NGBE_MII_AUTONEG_REG 0x0 + +/* PHY_XS_DEV regs */ +#define NGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define NGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ + +/* Media-dependent registers. */ +#define NGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define NGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define NGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define NGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ + +#define NGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define NGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define NGBE_MDIO_PHY_SPEED_10M 0x0040 /* 10M capable */ + +#define NGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define NGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ + +#define NGBE_PHY_REVISION_MASK 0xFFFFFFF0U +#define NGBE_MAX_PHY_ADDR 32 + +#define NGBE_MDIO_CLAUSE_SELECT 0x11220 + +/* INTERNAL PHY CONTROL */ +#define NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET 31 +#define NGBE_INTERNAL_PHY_OFFSET_MAX 32 +#define NGBE_INTERNAL_PHY_ID 0x000732 + +#define NGBE_INTPHY_LED0 0x0010 +#define NGBE_INTPHY_LED1 0x0040 +#define NGBE_INTPHY_LED2 0x2000 + +#define NGBE_INTPHY_INT_LSC 0x0010 +#define NGBE_INTPHY_INT_ANC 0x0008 + + + + +/* PHY MDI STANDARD CONFIG */ +#define NGBE_MDI_PHY_ID1_OFFSET 2 +#define NGBE_MDI_PHY_ID2_OFFSET 3 +#define NGBE_MDI_PHY_ID_MASK 0xFFFFFC00U +#define NGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define NGBE_MDI_PHY_DUPLEX 0x0100 +#define NGBE_MDI_PHY_RESTART_AN 0x0200 +#define NGBE_MDI_PHY_ANE 0x1000 +#define NGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define NGBE_MDI_PHY_RESET 0x8000 + +#define NGBE_PHY_RST_WAIT_PERIOD 50 + +#define NGBE_M88E1512_PHY_ID 0x005043 +/* reg 18_0 */ +#define NGBE_M88E1512_INT_LSC 0x0400 +#define NGBE_M88E1512_INT_ANC 0x0800 +/* reg 18_3 */ +#define NGBE_M88E1512_INT_EN 0x0080 +#define NGBE_M88E1512_INT_POL 0x0800 + +/* reg 21_2 */ +#define NGBE_M88E1512_RGM_TTC 0x0010 +#define NGBE_M88E1512_RGM_RTC 0x0020 + +/* LED control */ +#define NGBE_M88E1512_LED1_CONF 0x6 +#define NGBE_M88E1512_LED0_CONF 0x1 + +/* LED polarity */ +#define NGBE_M88E1512_LED1_POL 0x1 +#define NGBE_M88E1512_LED0_POL 0x1 + +/* reg 4_0 ADV REG*/ +#define NGBE_M88E1512_10BASET_HALF 0x0020 +#define NGBE_M88E1512_10BASET_FULL 0x0040 +#define NGBE_M88E1512_100BASET_HALF 0x0080 +#define NGBE_M88E1512_100BASET_FULL 0x0100 + +/* reg 9_0 ADV REG*/ +#define NGBE_M88E1512_1000BASET_HALF 0x0100 +#define NGBE_M88E1512_1000BASET_FULL 0x0200 + +/* reg 19_0 INT status*/ +#define NGBE_M88E1512_ANC 0x0800 +#define NGBE_M88E1512_LSC 0x0400 + +/* yt8521s reg */ +#define NGBE_YT8521S_PHY_ID 0x011a +#define NGBE_YT8531S_PHY_ID 0xe91a + +#define NGBE_YT8521S_SDS_LINK_UP 0x4 +#define NGBE_YT8521S_SDS_LINK_DOWN 0x8 +#define NGBE_YT8521S_UTP_LINK_UP 0x400 +#define NGBE_YT8521S_UTP_LINK_DOWN 0x800 + +#define NGBE_YT8521S_PHY_SPEED_SELECT1 0x0040 +#define NGBE_YT8521S_PHY_SPEED_SELECT0 0x2000 +#define NGBE_YT8521S_PHY_DUPLEX 0x0100 +#define NGBE_YT8521S_PHY_RESET 0x8000 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410U +#define QT2022_PHY_ID 0x0043A400U +#define ATH_PHY_ID 0x03429050U +/* PHY FW revision */ +#define TNX_FW_REV 0xB +#define AQ_FW_REV 0x20 + +/* ETH PHY Registers */ +#define NGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define NGBE_SR_PCS_CTL2 0x30007 +#define NGBE_SR_PMA_MMD_CTL1 0x10000 +#define NGBE_SR_MII_MMD_CTL 0x1F0000 +#define NGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define NGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define NGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define NGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define NGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define NGBE_SR_AN_MMD_CTL 0x70000 +#define NGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define NGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define NGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define NGBE_VR_AN_KR_MODE_CL 0x78003 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 + +#define NGBE_PHY_MPLLA_CTL0 0x18071 +#define NGBE_PHY_MPLLA_CTL3 0x18077 +#define NGBE_PHY_MISC_CTL0 0x18090 +#define NGBE_PHY_VCO_CAL_LD0 0x18092 +#define NGBE_PHY_VCO_CAL_LD1 0x18093 +#define NGBE_PHY_VCO_CAL_LD2 0x18094 +#define NGBE_PHY_VCO_CAL_LD3 0x18095 +#define NGBE_PHY_VCO_CAL_REF0 0x18096 +#define NGBE_PHY_VCO_CAL_REF1 0x18097 +#define NGBE_PHY_RX_AD_ACK 0x18098 +#define NGBE_PHY_AFE_DFE_ENABLE 0x1805D +#define NGBE_PHY_DFE_TAP_CTL0 0x1805E +#define NGBE_PHY_RX_EQ_ATT_LVL0 0x18057 +#define NGBE_PHY_RX_EQ_CTL0 0x18058 +#define NGBE_PHY_RX_EQ_CTL 0x1805C +#define NGBE_PHY_TX_EQ_CTL0 0x18036 +#define NGBE_PHY_TX_EQ_CTL1 0x18037 +#define NGBE_PHY_TX_RATE_CTL 0x18034 +#define NGBE_PHY_RX_RATE_CTL 0x18054 +#define NGBE_PHY_TX_GEN_CTL2 0x18032 +#define NGBE_PHY_RX_GEN_CTL2 0x18052 +#define NGBE_PHY_RX_GEN_CTL3 0x18053 +#define NGBE_PHY_MPLLA_CTL2 0x18073 +#define NGBE_PHY_RX_POWER_ST_CTL 0x18055 +#define NGBE_PHY_TX_POWER_ST_CTL 0x18035 +#define NGBE_PHY_TX_GENCTRL1 0x18031 + +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R 0x0 +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X 0x1 +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK 0x3 +#define NGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G 0x0 +#define NGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK 0x2000 +#define NGBE_SR_PMA_MMD_CTL1_LB_EN 0x1 +#define NGBE_SR_MII_MMD_CTL_AN_EN 0x1000 +#define NGBE_SR_MII_MMD_CTL_RESTART_AN 0x0200 +#define NGBE_SR_AN_MMD_CTL_RESTART_AN 0x0200 +#define NGBE_SR_AN_MMD_CTL_ENABLE 0x1000 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10 + +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32 +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40 +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x46 +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56 +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF +#define NGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1 +#define NGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE +#define NGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00 +#define NGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344 +#define NGBE_PHY_VCO_CAL_LD0_OTHER 1360 +#define NGBE_PHY_VCO_CAL_LD0_MASK 0x1000 +#define NGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42 +#define NGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34 +#define NGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F +#define NGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10 +#define NGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1 +#define NGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF +#define NGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1 +#define NGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2 +#define NGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20 +#define NGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200 +#define NGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7 +#define NGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70 +#define NGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700 +#define NGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2 +#define NGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20 +#define NGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200 +#define NGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7 +#define NGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70 +#define NGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700 +#define NGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000 +#define NGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100 +#define NGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300 +#define NGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400 +#define NGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00 +#define NGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000 +#define NGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000 +#define NGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000 +#define NGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000 +#define NGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100 +#define NGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300 +#define NGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400 +#define NGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00 +#define NGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000 +#define NGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000 +#define NGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000 +#define NGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000 + +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700 + +#define NGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 +#define NGBE_PHY_INIT_DONE_POLLING_TIME 100 + +/**************** Global Registers ****************************/ /* chip control Registers */ -#define NGBE_MIS_PRB_CTL 0x10010 +#define NGBE_MIS_RST 0x1000C +#define NGBE_MIS_PWR 0x10000 +#define NGBE_MIS_CTL 0x10004 +#define NGBE_MIS_PF_SM 0x10008 +#define NGBE_MIS_PRB_CTL 0x10010 +#define NGBE_MIS_ST 0x10028 +#define NGBE_MIS_SWSM 0x1002C +#define NGBE_MIS_RST_ST 0x10030 + +#define NGBE_MIS_RST_SW_RST 0x00000001U +#define NGBE_MIS_RST_LAN0_RST 0x00000002U +#define NGBE_MIS_RST_LAN1_RST 0x00000004U +#define NGBE_MIS_RST_LAN2_RST 0x00000008U +#define NGBE_MIS_RST_LAN3_RST 0x00000010U +#define NGBE_MIS_RST_FW_RST 0x00000020U + +#define NGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U +#define NGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U +#define NGBE_MIS_RST_GLOBAL_RST 0x80000000U + +#define NGBE_MIS_PWR_LAN_ID(_r) ((0xF0000000U & (_r)) >> 28) +#define NGBE_MIS_PWR_LAN_ID_0 (1) +#define NGBE_MIS_PWR_LAN_ID_1 (2) +#define NGBE_MIS_PWR_LAN_ID_2 (3) +#define NGBE_MIS_PWR_LAN_ID_3 (4) + +#define NGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define NGBE_MIS_ST_MNG_VETO 0x00000100U +#define NGBE_MIS_ST_LAN0_ECC 0x00010000U +#define NGBE_MIS_ST_LAN1_ECC 0x00020000U +#define NGBE_MIS_ST_LAN2_ECC 0x00040000U +#define NGBE_MIS_ST_LAN3_ECC 0x00080000U +#define NGBE_MIS_ST_MNG_ECC 0x00100000U +#define NGBE_MIS_ST_PCORE_ECC 0x00200000U +#define NGBE_MIS_ST_PCIWRP_ECC 0x00400000U +#define NGBE_MIS_ST_PCIEPHY_ECC 0x00800000U +#define NGBE_MIS_ST_FMGR_ECC 0x01000000U +#define NGBE_MIS_ST_GPHY_IN_RST(_r) (0x00000200U << (_r)) + + +#define NGBE_MIS_SWSM_SMBI 1 +#define NGBE_MIS_RST_ST_DEV_RST_ST_DONE 0x00000000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_REQ 0x00080000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS 0x00100000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 +#define NGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define NGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define NGBE_MIS_RST_ST_RST_TIM 0x000000FFU +#define NGBE_MIS_PF_SM_SM 1 +#define NGBE_MIS_PRB_CTL_LAN0_UP 0x8 +#define NGBE_MIS_PRB_CTL_LAN1_UP 0x4 +#define NGBE_MIS_PRB_CTL_LAN2_UP 0x2 +#define NGBE_MIS_PRB_CTL_LAN3_UP 0x1 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define NGBE_TS_CTL 0x10300 +#define NGBE_TS_EN 0x10304 +#define NGBE_TS_ST 0x10308 +#define NGBE_TS_ALARM_THRE 0x1030C +#define NGBE_TS_DALARM_THRE 0x10310 +#define NGBE_TS_INT_EN 0x10314 +#define NGBE_TS_ALARM_ST 0x10318 +#define NGBE_TS_ALARM_ST_DALARM 0x00000002U +#define NGBE_TS_ALARM_ST_ALARM 0x00000001U + +#define NGBE_EFUSE_WDATA0 0x10320 +#define NGBE_EFUSE_WDATA1 0x10324 +#define NGBE_EFUSE_RDATA0 0x10328 +#define NGBE_EFUSE_RDATA1 0x1032C +#define NGBE_EFUSE_STATUS 0x10330 + + +#define NGBE_TS_CTL_CALI_DONE 0x80000000U +#define NGBE_TS_EN_ENA 0x00000001U +#define NGBE_TS_ST_DATA_OUT_MASK 0x000003FFU +#define NGBE_TS_ALARM_THRE_MASK 0x000003FFU +#define NGBE_TS_DALARM_THRE_MASK 0x000003FFU +#define NGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define NGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U + +struct ngbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct ngbe_thermal_sensor_data { + struct ngbe_thermal_diode_data sensor; +}; + + /* FMGR Registers */ -#define NGBE_SPI_ILDR_STATUS 0x10120 -#define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ -#define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ +#define NGBE_SPI_ILDR_STATUS 0x10120 +#define NGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define NGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset done */ +#define NGBE_SPI_ILDR_STATUS_SW_RESET 0x00000800U /* software reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00002000U /* lan0 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00004000U /* lan1 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN2_SW_RST 0x00008000U /* lan2 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN3_SW_RST 0x00010000U /* lan3 soft reset done */ -/* Checksum and EEPROM pointers */ -#define NGBE_CALSUM_COMMAND 0xE9 -#define NGBE_CALSUM_CAP_STATUS 0x10224 -#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C -#define NGBE_SAN_MAC_ADDR_PTR 0x18 -#define NGBE_DEVICE_CAPS 0x1C -#define NGBE_EEPROM_VERSION_L 0x1D -#define NGBE_EEPROM_VERSION_H 0x1E -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 +#define NGBE_MAX_FLASH_LOAD_POLL_TIME 10 + +#define NGBE_SPI_CMD 0x10104 +#define NGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define NGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define NGBE_SPI_CMD_ADDR(_v) (((_v) & 0x7FFFFF)) + +#define NGBE_SPI_DATA 0x10108 +#define NGBE_SPI_DATA_BYPASS ((0x1) << 31) +#define NGBE_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) +#define NGBE_SPI_DATA_OP_DONE ((0x1)) + +#define NGBE_SPI_STATUS 0x1010C +#define NGBE_SPI_STATUS_OPDONE ((0x1)) +#define NGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) + +#define NGBE_SPI_USR_CMD 0x10110 +#define NGBE_SPI_CMDCFG0 0x10114 +#define NGBE_SPI_CMDCFG1 0x10118 +#define NGBE_SPI_ILDR_SWPTR 0x10124 + +/************************* Port Registers ************************************/ + +/* port cfg Registers */ +#define NGBE_CFG_PORT_CTL 0x14400 +#define NGBE_CFG_PORT_ST 0x14404 +#define NGBE_CFG_EX_VTYPE 0x14408 +#define NGBE_CFG_LED_CTL 0x14424 + +/* internal phy reg_offset [0,31] */ +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) + +#define NGBE_CFG_TCP_TIME 0x14420 +#define NGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) /* [0,3] */ +#define NGBE_CFG_LAN_SPEED 0x14440 + + + +/* port cfg bit */ +#define NGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ +#define NGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define NGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U +#define NGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define NGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define NGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00001000U /* number of TVs */ +#define NGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define NGBE_CFG_PORT_CTL_NUM_VT_8 0x00001000U +/* Status Bit */ +#define NGBE_CFG_PORT_ST_LINK_1000M 0x00000002U +#define NGBE_CFG_PORT_ST_LINK_100M 0x00000004U +#define NGBE_CFG_PORT_ST_LINK_10M 0x00000008U +#define NGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000300U & (_r)) >> 8) +#define NGBE_LINK_UP_TIME 90 + +/* LED CTL Bit */ + +#define NGBE_CFG_LED_CTL_LINK_10M_SEL 0x00000008U +#define NGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000004U +#define NGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000002U +#define NGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 +/* LED modes */ +#define NGBE_LED_LINK_10M NGBE_CFG_LED_CTL_LINK_10M_SEL +#define NGBE_LED_LINK_1G NGBE_CFG_LED_CTL_LINK_1G_SEL +#define NGBE_LED_LINK_100M NGBE_CFG_LED_CTL_LINK_100M_SEL /* GPIO Registers */ -#define NGBE_GPIO_DR 0x14800 -#define NGBE_GPIO_DDR 0x14804 +#define NGBE_GPIO_DR 0x14800 +#define NGBE_GPIO_DDR 0x14804 +#define NGBE_GPIO_CTL 0x14808 +#define NGBE_GPIO_INTEN 0x14830 +#define NGBE_GPIO_INTMASK 0x14834 +#define NGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define NGBE_GPIO_POLARITY 0x1483C +#define NGBE_GPIO_INTSTATUS 0x14840 +#define NGBE_GPIO_EOI 0x1484C /*GPIO bit */ -#define NGBE_GPIO_DR_0 BIT(0) /* SDP0 Data Value */ -#define NGBE_GPIO_DR_1 BIT(1) /* SDP1 Data Value */ -#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ -#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ +#define NGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ +#define NGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ +#define NGBE_GPIO_DDR_0 0x00000001U /* SDP0 IO direction */ +#define NGBE_GPIO_DDR_1 0x00000002U /* SDP1 IO direction */ +#define NGBE_GPIO_CTL_SW_MODE 0x00000000U /* SDP software mode */ +#define NGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ +#define NGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ +#define NGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define NGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ +#define NGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ +#define NGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define NGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ +#define NGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ +#define NGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define NGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ +#define NGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ +#define NGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ +#define NGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define NGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ +#define NGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ + +/* TPH registers */ +#define NGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define NGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define NGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define NGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ +/* TPH bit */ +#define NGBE_CFG_TPH_TDESC_EN 0x80000000U +#define NGBE_CFG_TPH_TDESC_PH_SHIFT 29 +#define NGBE_CFG_TPH_TDESC_ST_SHIFT 16 +#define NGBE_CFG_TPH_RDESC_EN 0x80000000U +#define NGBE_CFG_TPH_RDESC_PH_SHIFT 29 +#define NGBE_CFG_TPH_RDESC_ST_SHIFT 16 +#define NGBE_CFG_TPH_RHDR_EN 0x00008000U +#define NGBE_CFG_TPH_RHDR_PH_SHIFT 13 +#define NGBE_CFG_TPH_RHDR_ST_SHIFT 0 +#define NGBE_CFG_TPH_RPL_EN 0x80000000U +#define NGBE_CFG_TPH_RPL_PH_SHIFT 29 +#define NGBE_CFG_TPH_RPL_ST_SHIFT 16 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define NGBE_TDM_CTL 0x18000 +#define NGBE_TDM_POOL_TE 0x18004 +#define NGBE_TDM_PB_THRE 0x18020 + + + +#define NGBE_TDM_LLQ 0x18040 +#define NGBE_TDM_ETYPE_LB_L 0x18050 + +#define NGBE_TDM_ETYPE_AS_L 0x18058 +#define NGBE_TDM_MAC_AS_L 0x18060 + +#define NGBE_TDM_VLAN_AS_L 0x18070 + +#define NGBE_TDM_TCP_FLG_L 0x18078 +#define NGBE_TDM_TCP_FLG_H 0x1807C +#define NGBE_TDM_DESC_FATAL 0x180D0 +#define NGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 8 of these 0 - 7 */ +/* TDM CTL BIT */ +#define NGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ +#define NGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ +#define NGBE_TDM_CTL_VT_SHIFT 16 /* VLAN EtherType */ +/* Per VF Port VLAN insertion rules */ +#define NGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ +#define NGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ + +#define NGBE_TDM_RP_CTL_RST ((0x1) << 0) +#define NGBE_TDM_RP_CTL_RPEN ((0x1) << 2) +#define NGBE_TDM_RP_CTL_RLEN ((0x1) << 3) +#define NGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) +#define NGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) + +/* qos */ +#define NGBE_TDM_PBWARB_CTL 0x18200 +#define NGBE_TDM_VM_CREDIT_VAL(v) (0x3FF & (v)) + +/* etag */ +#define NGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 8 of these 0 - 7 */ +/* statistic */ +#define NGBE_TDM_DRP_CNT 0x18300 +#define NGBE_TDM_SEC_DRP 0x18304 +#define NGBE_TDM_PKT_CNT 0x18308 +#define NGBE_TDM_BYTE_CNT_L 0x1830C +#define NGBE_TDM_BYTE_CNT_H 0x18310 +#define NGBE_TDM_OS2BMC_CNT 0x18314 + +/**************************** Receive DMA registers **************************/ +/* receive control */ +#define NGBE_RDM_ARB_CTL 0x12000 +#define NGBE_RDM_POOL_RE 0x12004 + +#define NGBE_RDM_PF_QDE 0x12080 +#define NGBE_RDM_PF_HIDE 0x12090 +/* VFRE bitmask */ +#define NGBE_RDM_POOL_RE_ENABLE_ALL 0xFFFFFFFFU + +/* statistic */ +#define NGBE_RDM_DRP_PKT 0x12500 +#define NGBE_RDM_PKT_CNT 0x12504 +#define NGBE_RDM_BYTE_CNT_L 0x12508 +#define NGBE_RDM_BYTE_CNT_H 0x1250C +#define NGBE_RDM_BMC2OS_CNT 0x12510 + +/***************************** RDB registers *********************************/ +/* Flow Control Registers */ +#define NGBE_RDB_RFCV 0x19200 +#define NGBE_RDB_RFCL 0x19220 +#define NGBE_RDB_RFCH 0x19260 +#define NGBE_RDB_RFCRT 0x192A0 +#define NGBE_RDB_RFCC 0x192A4 +/* receive packet buffer */ +#define NGBE_RDB_PB_WRAP 0x19004 +#define NGBE_RDB_PB_SZ 0x19020 + +#define NGBE_RDB_PB_CTL 0x19000 +#define NGBE_RDB_PB_SZ_SHIFT 10 +#define NGBE_RDB_PB_SZ_MASK 0x000FFC00U +/* lli interrupt */ +#define NGBE_RDB_LLI_THRE 0x19080 +#define NGBE_RDB_LLI_THRE_SZ(_v) ((0xFFF & (_v))) +#define NGBE_RDB_LLI_THRE_UP(_v) ((0x7 & (_v)) << 16) +#define NGBE_RDB_LLI_THRE_UP_SHIFT 16 + +/* ring assignment */ +#define NGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) /* [0,7] */ +#define NGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) /* [0,31] */ +#define NGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) /* [0,9] */ +#define NGBE_RDB_RA_CTL 0x194F4 +#define NGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define NGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define NGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define NGBE_RDB_SYN_CLS 0x19130 +#define NGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*8 of these (0-7)*/ +/* VM RSS */ +#define NGBE_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40)) +#define NGBE_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40)) +/* statistic */ +#define NGBE_RDB_MPCNT 0x19040 +#define NGBE_RDB_PKT_CNT 0x19060 +#define NGBE_RDB_REPLI_CNT 0x19064 +#define NGBE_RDB_DRP_CNT 0x19068 +#define NGBE_RDB_LXONTXC 0x1921C +#define NGBE_RDB_LXOFFTXC 0x19218 +#define NGBE_RDB_PFCMACDAL 0x19210 +#define NGBE_RDB_PFCMACDAH 0x19214 +#define NGBE_RDB_TXSWERR 0x1906C +#define NGBE_RDB_TXSWERR_TB_FREE 0x3FF +/* rdb_pl_cfg reg mask */ +#define NGBE_RDB_PL_CFG_L4HDR 0x2 +#define NGBE_RDB_PL_CFG_L3HDR 0x4 +#define NGBE_RDB_PL_CFG_L2HDR 0x8 +#define NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define NGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 +/* RQTC Bit Masks and Shifts */ +#define NGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) +#define NGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) +#define NGBE_RDB_RSS_TC_TC1_MASK (0x7 << 4) +#define NGBE_RDB_RSS_TC_TC2_MASK (0x7 << 8) +#define NGBE_RDB_RSS_TC_TC3_MASK (0x7 << 12) +#define NGBE_RDB_RSS_TC_TC4_MASK (0x7 << 16) +#define NGBE_RDB_RSS_TC_TC5_MASK (0x7 << 20) +#define NGBE_RDB_RSS_TC_TC6_MASK (0x7 << 24) +#define NGBE_RDB_RSS_TC_TC7_MASK (0x7 << 28) +/* Packet Buffer Initialization */ +#define NGBE_MAX_PACKET_BUFFERS 8 +#define NGBE_RDB_PB_SZ_48KB 0x00000030U /* 48KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_64KB 0x00000040U /* 64KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_80KB 0x00000050U /* 80KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_128KB 0x00000080U /* 128KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_MAX 0x00000200U /* 512KB Packet Buffer */ + + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + + +/* FCRTL Bit Masks */ +#define NGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define NGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ +/* FCCFG Bit Masks */ +#define NGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define NGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ +#define NGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define NGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ +#define NGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ + +#define NGBE_MAX_RDB_5T_CTL0_FILTERS 128 +#define NGBE_RDB_5T_CTL0_PROTOCOL_MASK 0x00000003U +#define NGBE_RDB_5T_CTL0_PROTOCOL_TCP 0x00000000U +#define NGBE_RDB_5T_CTL0_PROTOCOL_UDP 0x00000001U +#define NGBE_RDB_5T_CTL0_PROTOCOL_SCTP 2 +#define NGBE_RDB_5T_CTL0_PRIORITY_MASK 0x00000007U +#define NGBE_RDB_5T_CTL0_PRIORITY_SHIFT 2 +#define NGBE_RDB_5T_CTL0_POOL_MASK 0x0000003FU +#define NGBE_RDB_5T_CTL0_POOL_SHIFT 8 +#define NGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK 0x00000007U +#define NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT 27 +#define NGBE_RDB_5T_CTL0_SOURCE_PORT_MASK 0x1B +#define NGBE_RDB_5T_CTL0_DEST_PORT_MASK 0x05 +#define NGBE_RDB_5T_CTL0_PROTOCOL_COMP_MASK 0x0F +#define NGBE_RDB_5T_CTL0_POOL_MASK_EN 0x40000000U +#define NGBE_RDB_5T_CTL0_QUEUE_ENABLE 0x80000000U + +#define NGBE_RDB_ETYPE_CLS_RX_QUEUE 0x007F0000U /* bits 22:16 */ +#define NGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT 16 +#define NGBE_RDB_ETYPE_CLS_LLI 0x20000000U /* bit 29 */ +#define NGBE_RDB_ETYPE_CLS_QUEUE_EN 0x80000000U /* bit 31 */ + +/* Receive Config masks */ +#define NGBE_RDB_PB_CTL_PBEN (0x80000000) /* Enable Receiver */ +#define NGBE_RDB_PB_CTL_DISABLED 0x1 + +#define NGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define NGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define NGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define NGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define NGBE_PSR_CTL 0x15000 +#define NGBE_PSR_VLAN_CTL 0x15088 +#define NGBE_PSR_VM_CTL 0x151B0 +#define NGBE_PSR_PKT_CNT 0x151B8 +#define NGBE_PSR_MNG_PKT_CNT 0x151BC +#define NGBE_PSR_DBG_DOP_CNT 0x151C0 +#define NGBE_PSR_MNG_DOP_CNT 0x151C4 +#define NGBE_PSR_VM_FLP_L 0x151C8 + +/* Header split receive */ +#define NGBE_PSR_CTL_SW_EN 0x00040000U +#define NGBE_PSR_CTL_PCSD 0x00002000U +#define NGBE_PSR_CTL_IPPCSE 0x00001000U +#define NGBE_PSR_CTL_BAM 0x00000400U +#define NGBE_PSR_CTL_UPE 0x00000200U +#define NGBE_PSR_CTL_MPE 0x00000100U +#define NGBE_PSR_CTL_MFE 0x00000080U +#define NGBE_PSR_CTL_MO 0x00000060U +#define NGBE_PSR_CTL_TPE 0x00000010U +#define NGBE_PSR_CTL_MO_SHIFT 5 +/* VT_CTL bitmasks */ +#define NGBE_PSR_VM_CTL_DIS_DEFPL 0x20000000U /* disable default pool */ +#define NGBE_PSR_VM_CTL_REPLEN 0x40000000U /* replication enabled */ +#define NGBE_PSR_VM_CTL_POOL_SHIFT 7 +#define NGBE_PSR_VM_CTL_POOL_MASK (0x7 << NGBE_PSR_VM_CTL_POOL_SHIFT) +/* VLAN Control Bit Masks */ +#define NGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define NGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define NGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define NGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define NGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +/* VMOLR bitmasks */ +#define NGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define NGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define NGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define NGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define NGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define NGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define NGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define NGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define NGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define NGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define NGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* etype switcher 1st stage */ +#define NGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define NGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define NGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define NGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define NGBE_PSR_ETYPE_SWC_POOL_ENABLE (1 << 26) /* bit 26 */ +#define NGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define NGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define NGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define NGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define NGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define NGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define NGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/* mcasst/ucast overflow tbl */ +#define NGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define NGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* vlan tbl */ +#define NGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define NGBE_PSR_MAC_SWC_AD_L 0x16200 +#define NGBE_PSR_MAC_SWC_AD_H 0x16204 +#define NGBE_PSR_MAC_SWC_VM 0x16208 +#define NGBE_PSR_MAC_SWC_IDX 0x16210 +/* RAH */ +#define NGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define NGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define NGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define NGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define NGBE_PSR_VLAN_SWC 0x16220 +#define NGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define NGBE_PSR_VLAN_SWC_IDX 0x16230 /* 32 vlan entries */ +/* VLAN pool filtering masks */ +#define NGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ +#define NGBE_PSR_VLAN_SWC_ENTRIES 32 +#define NGBE_PSR_VLAN_SWC_VLANID_MASK 0x00000FFFU +#define NGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* Manangbeent */ +#define NGBE_PSR_MNG_FIT_CTL 0x15820 +/* Manangbeent Bit Fields and Masks */ +#define NGBE_PSR_MNG_FIT_CTL_MPROXYE 0x40000000U /* Manangbeent Proxy Enable*/ +#define NGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */ +#define NGBE_PSR_MNG_FIT_CTL_EN_BMC2OS 0x10000000U /* Ena BMC2OS and OS2BMC + *traffic */ +#define NGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT 28 + +#define NGBE_PSR_MNG_FLEX_SEL 0x1582C +#define NGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define NGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define NGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define NGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) + +/* 1588 */ +#define NGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define NGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define NGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define NGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define NGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define NGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define NGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define NGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define NGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define NGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define NGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define NGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define NGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define NGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define NGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define NGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define NGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define NGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define NGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define NGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define NGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define NGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* Wake up registers */ +#define NGBE_PSR_WKUP_CTL 0x15B80 +#define NGBE_PSR_WKUP_IPV 0x15B84 +#define NGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define NGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define NGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_CTL 0x15CFC +/* Wake Up Filter Control Bit */ +#define NGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define NGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset + * assertion */ +/* Mask for Ext. flex filters */ +#define NGBE_PSR_WKUP_CTL_EXT_FLX_FILTERS 0x00300000U +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS 0x000F00FFU /* Mask all 4 flex filters*/ +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS_6 0x003F00FFU /* Mask all 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS_8 0x00FF00FFU /* Mask all 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_OFFSET 16 /* Offset to the Flex Filters bits*/ + +#define NGBE_PSR_MAX_SZ 0x15020 + +/****************************** TDB ******************************************/ +#define NGBE_TDB_TFCS 0x1CE00 +#define NGBE_TDB_PB_SZ 0x1CC00 + +#define NGBE_TDB_PRB_CTL 0x17010 +#define NGBE_TDB_PBRARB_CTL 0x1CD00 + +#define NGBE_TDB_PB_SZ_MAX 0x00005000U /* 20KB Packet Buffer */ +#define NGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define NGBE_MAX_PB 8 +/* statistic */ +#define NGBE_TDB_OUT_PKT_CNT 0x1CF00 +#define NGBE_TDB_MNG_PKT_CNT 0x1CF04 +#define NGBE_TDB_LB_PKT_CNT 0x1CF08 +#define NGBE_TDB_MNG_LARGE_DOP_CNT 0x1CF0C + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define NGBE_TSEC_CTL 0x1D000 +#define NGBE_TSEC_ST 0x1D004 +#define NGBE_TSEC_BUF_AF 0x1D008 +#define NGBE_TSEC_BUF_AE 0x1D00C +#define NGBE_TSEC_MIN_IFG 0x1D020 + +/* 1588 */ +#define NGBE_TSEC_1588_CTL 0x11F00 /* Tx Time Sync Control reg */ +#define NGBE_TSEC_1588_STMPL 0x11F04 /* Tx timestamp value Low */ +#define NGBE_TSEC_1588_STMPH 0x11F08 /* Tx timestamp value High */ +#define NGBE_TSEC_1588_SYSTIML 0x11F0C /* System time register Low */ +#define NGBE_TSEC_1588_SYSTIMH 0x11F10 /* System time register High */ +#define NGBE_TSEC_1588_INC 0x11F14 /* Increment attributes reg */ +#define NGBE_TSEC_1588_INC_IV(v) ((v) & 0x7FFFFFF) + +#define NGBE_TSEC_1588_ADJL 0x11F18 /* Time Adjustment Offset reg Low */ +#define NGBE_TSEC_1588_ADJH 0x11F1C /* Time Adjustment Offset reg High*/ + +#define NGBE_TSEC_1588_INT_ST 0x11F20 +#define NGBE_TSEC_1588_INT_EN 0x11F24 + +/* 1588 fields */ +#define NGBE_TSEC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define NGBE_TSEC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +#define NGBE_TSEC_1588_AUX_CTL 0x11F28 +#define NGBE_TSEC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_L(i) (0x11F3C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_H(i) (0x11F40 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_L(i) (0x11F4C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_H(i) (0x11F50 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_SDP(n) (0x11F5C + ((n) * 4)) /* [0,3] */ + + + +/********************************* RSEC **************************************/ +/* general rsec */ +#define NGBE_RSEC_CTL 0x17000 +#define NGBE_RSEC_ST 0x17004 +/* general rsec fields */ +#define NGBE_RSEC_CTL_SECRX_DIS 0x00000001U +#define NGBE_RSEC_CTL_RX_DIS 0x00000002U +#define NGBE_RSEC_CTL_CRC_STRIP 0x00000004U +#define NGBE_RSEC_CTL_SAVE_MAC_ERR 0x00000040U +#define NGBE_RSEC_ST_RSEC_RDY 0x00000001U +#define NGBE_RSEC_ST_RSEC_OFLD_DIS 0x00000002U +#define NGBE_RSEC_ST_ECC_RXERR 0x00000004U + +/* link sec */ +#define NGBE_RSEC_LSEC_CAP 0x17200 +#define NGBE_RSEC_LSEC_CTL 0x17204 +#define NGBE_RSEC_LSEC_SCI_L 0x17208 +#define NGBE_RSEC_LSEC_SCI_H 0x1720C +#define NGBE_RSEC_LSEC_SA0 0x17210 +#define NGBE_RSEC_LSEC_SA1 0x17214 +#define NGBE_RSEC_LSEC_PKNUM0 0x17218 +#define NGBE_RSEC_LSEC_PKNUM1 0x1721C +#define NGBE_RSEC_LSEC_KEY0(_n) 0x17220 +#define NGBE_RSEC_LSEC_KEY1(_n) 0x17230 +#define NGBE_RSEC_LSEC_UNTAG_PKT 0x17240 +#define NGBE_RSEC_LSEC_DEC_OCTET 0x17244 +#define NGBE_RSEC_LSEC_VLD_OCTET 0x17248 +#define NGBE_RSEC_LSEC_BAD_PKT 0x1724C +#define NGBE_RSEC_LSEC_NOSCI_PKT 0x17250 +#define NGBE_RSEC_LSEC_UNSCI_PKT 0x17254 +#define NGBE_RSEC_LSEC_UNCHK_PKT 0x17258 +#define NGBE_RSEC_LSEC_DLY_PKT 0x1725C +#define NGBE_RSEC_LSEC_LATE_PKT 0x17260 +#define NGBE_RSEC_LSEC_OK_PKT(_n) 0x17264 +#define NGBE_RSEC_LSEC_INV_PKT(_n) 0x17274 +#define NGBE_RSEC_LSEC_BADSA_PKT 0x1727C +#define NGBE_RSEC_LSEC_INVSA_PKT 0x17280 + +/* ipsec */ +#define NGBE_RSEC_IPS_IDX 0x17100 +#define NGBE_RSEC_IPS_IDX_WT 0x80000000U +#define NGBE_RSEC_IPS_IDX_RD 0x40000000U +#define NGBE_RSEC_IPS_IDX_TB_IDX 0x0U /* */ +#define NGBE_RSEC_IPS_IDX_TB_IP 0x00000002U +#define NGBE_RSEC_IPS_IDX_TB_SPI 0x00000004U +#define NGBE_RSEC_IPS_IDX_TB_KEY 0x00000006U +#define NGBE_RSEC_IPS_IDX_EN 0x00000001U +#define NGBE_RSEC_IPS_IP(i) (0x17104 + ((i) * 4)) +#define NGBE_RSEC_IPS_SPI 0x17114 +#define NGBE_RSEC_IPS_IP_IDX 0x17118 +#define NGBE_RSEC_IPS_KEY(i) (0x1711C + ((i) * 4)) +#define NGBE_RSEC_IPS_SALT 0x1712C +#define NGBE_RSEC_IPS_MODE 0x17130 +#define NGBE_RSEC_IPS_MODE_IPV6 0x00000010 +#define NGBE_RSEC_IPS_MODE_DEC 0x00000008 +#define NGBE_RSEC_IPS_MODE_ESP 0x00000004 +#define NGBE_RSEC_IPS_MODE_AH 0x00000002 +#define NGBE_RSEC_IPS_MODE_VALID 0x00000001 + +/************************************** ETH PHY ******************************/ +#define NGBE_XPCS_IDA_ADDR 0x13000 +#define NGBE_XPCS_IDA_DATA 0x13004 +#define NGBE_ETHPHY_IDA_ADDR 0x13008 +#define NGBE_ETHPHY_IDA_DATA 0x1300C + +/************************************** MNG ********************************/ +#define NGBE_MNG_FW_SM 0x1E000 +#define NGBE_MNG_SWFW_SYNC 0x1E008 +#define NGBE_MNG_MBOX 0x1E100 +#define NGBE_MNG_MBOX_CTL 0x1E044 + + +#define NGBE_MNG_OS2BMC_CNT 0x1E094 +#define NGBE_MNG_BMC2OS_CNT 0x1E090 + +/* Firmware Semaphore Register */ +#define NGBE_MNG_FW_SM_MODE_MASK 0xE +#define NGBE_MNG_FW_SM_TS_ENABLED 0x1 + +/* SW_FW_SYNC definitions */ +#define NGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define NGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define NGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define NGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define NGBE_MNG_MBOX_CTL_SWACK 0x2 +#define NGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define NGBE_MNG_MBOX_CTL_FWACK 0x8 + +/************************************* ETH MAC *****************************/ +#define NGBE_MAC_TX_CFG 0x11000 +#define NGBE_MAC_RX_CFG 0x11004 +#define NGBE_MAC_PKT_FLT 0x11008 +#define NGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define NGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define NGBE_MAC_WDG_TIMEOUT 0x1100C +#define NGBE_MAC_TX_FLOW_CTRL 0x11070 +#define NGBE_MAC_RX_FLOW_CTRL 0x11090 +#define NGBE_MAC_INT_ST 0x110B0 +#define NGBE_MAC_INT_EN 0x110B4 +#define NGBE_MAC_ADDRESS0_HIGH 0x11300 +#define NGBE_MAC_ADDRESS0_LOW 0x11304 + +#define NGBE_MAC_TX_CFG_TE 0x00000001U +#define NGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define NGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define NGBE_MAC_RX_CFG_RE 0x00000001U +#define NGBE_MAC_RX_CFG_JE 0x00000100U +#define NGBE_MAC_RX_CFG_LM 0x00000400U +#define NGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U +#define NGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU +#define NGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 + +#define NGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ + +#define NGBE_MSCA 0x11200 +#define NGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define NGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define NGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define NGBE_MSCC 0x11204 +#define NGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define NGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +enum NGBE_MSCA_CMD_value { + NGBE_MSCA_CMD_RSV = 0, + NGBE_MSCA_CMD_WRITE, + NGBE_MSCA_CMD_POST_READ, + NGBE_MSCA_CMD_READ, +}; +#define NGBE_MSCC_SADDR ((0x1U) << 18) +#define NGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define NGBE_MSCC_BUSY ((0x1U) << 22) +#define NGBE_MDIO_CLK(v) ((0x7 & (v)) << 19) + + +/* EEE registers */ + +/* statistic */ +#define NGBE_MAC_LXOFFRXC 0x11988 +#define NGBE_MAC_PXOFFRXC 0x119DC +#define NGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define NGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define NGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define NGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define NGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define NGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define NGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define NGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define NGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define NGBE_MMC_CONTROL 0x11800 +#define NGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define NGBE_MMC_CONTROL_UP 0x700 + + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define NGBE_BME_CTL 0x12020 +#define NGBE_PX_MISC_IC 0x100 +#define NGBE_PX_MISC_ICS 0x104 +#define NGBE_PX_MISC_IEN 0x108 +#define NGBE_PX_MISC_IVAR 0x4FC +#define NGBE_PX_GPIE 0x118 +#define NGBE_PX_ISB_ADDR_L 0x160 +#define NGBE_PX_ISB_ADDR_H 0x164 +#define NGBE_PX_TCP_TIMER 0x170 +#define NGBE_PX_ITRSEL 0x180 +#define NGBE_PX_IC 0x120 +#define NGBE_PX_ICS 0x130 +#define NGBE_PX_IMS 0x140 +#define NGBE_PX_IMC 0x150 +#define NGBE_PX_IVAR(_i) (0x500 + (_i) * 4) /* [0,3] */ +#define NGBE_PX_ITR(_i) (0x200 + (_i) * 4) /* [0,8] */ +#define NGBE_PX_TRANSACTION_PENDING 0x168 +#define NGBE_PX_INTA 0x110 + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define NGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define NGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ +#define NGBE_PX_MISC_IC_STALL 0x00001000U /* trans or recv path is + * stalled */ +#define NGBE_PX_MISC_IC_LINKSEC 0x00002000U /* Tx LinkSec require key + * exchange */ +#define NGBE_PX_MISC_IC_RX_MISS 0x00004000U /* Packet Buffer Overrun */ +#define NGBE_PX_MISC_IC_I2C 0x00010000U /* I2C interrupt */ +#define NGBE_PX_MISC_IC_ETH_EVENT 0x00020000U /* err reported by MAC except + * eth link down */ +#define NGBE_PX_MISC_IC_PHY 0x00040000U /* link up */ +#define NGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ +#define NGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define NGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ +#define NGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ +#define NGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ +#define NGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ +#define NGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ +#define NGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +#define NGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ + +/* Extended Interrupt Cause Set */ +#define NGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_ICS_DEV_RST 0x00000400U +#define NGBE_PX_MISC_ICS_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_ICS_STALL 0x00001000U +#define NGBE_PX_MISC_ICS_LINKSEC 0x00002000U +#define NGBE_PX_MISC_ICS_RX_MISS 0x00004000U +#define NGBE_PX_MISC_ICS_FLOW_DIR 0x00008000U +#define NGBE_PX_MISC_ICS_I2C 0x00010000U +#define NGBE_PX_MISC_ICS_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_ICS_ETH_LK 0x00040000U +#define NGBE_PX_MISC_ICS_ETH_AN 0x00080000U +#define NGBE_PX_MISC_ICS_INT_ERR 0x00100000U +#define NGBE_PX_MISC_ICS_SPI 0x00200000U +#define NGBE_PX_MISC_ICS_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_ICS_GPIO 0x04000000U +#define NGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_ICS_TIMER 0x80000000U /* Extended Interrupt Enable Set */ -#define NGBE_PX_MISC_IEN_DEV_RST BIT(10) -#define NGBE_PX_MISC_IEN_ETH_LK BIT(18) -#define NGBE_PX_MISC_IEN_INT_ERR BIT(20) -#define NGBE_PX_MISC_IEN_GPIO BIT(26) +#define NGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define NGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_IEN_STALL 0x00001000U +#define NGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define NGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define NGBE_PX_MISC_IEN_I2C 0x00010000U +#define NGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define NGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define NGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define NGBE_PX_MISC_IEN_SPI 0x00200000U +#define NGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_IEN_GPIO 0x04000000U +#define NGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_IEN_TIMER 0x80000000U + #define NGBE_PX_MISC_IEN_MASK ( \ + NGBE_PX_MISC_IEN_ETH_LKDN| \ NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_ETH_EVENT | \ NGBE_PX_MISC_IEN_ETH_LK | \ + NGBE_PX_MISC_IEN_ETH_AN | \ NGBE_PX_MISC_IEN_INT_ERR | \ - NGBE_PX_MISC_IEN_GPIO) + NGBE_PX_MISC_IEN_VF_MBOX | \ + NGBE_PX_MISC_IEN_GPIO | \ + NGBE_PX_MISC_IEN_MNG_HOST_MBOX | \ + NGBE_PX_MISC_IEN_STALL | \ + NGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + NGBE_PX_MISC_IEN_TIMER) -#define NGBE_INTR_ALL 0x1FF -#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +/* General purpose Interrupt Enable */ +#define NGBE_PX_GPIE_MODEL 0x00000001U +#define NGBE_PX_GPIE_IMEN 0x00000002U +#define NGBE_PX_GPIE_LL_INTERVAL 0x000000F0U -#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) -#define NGBE_CFG_LAN_SPEED 0x14440 -#define NGBE_CFG_PORT_ST 0x14404 +/* Interrupt Vector Allocation Registers */ +#define NGBE_PX_IVAR_REG_NUM 64 +#define NGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ -/* Wake up registers */ -#define NGBE_PSR_WKUP_CTL 0x15B80 -/* Wake Up Filter Control Bit */ -#define NGBE_PSR_WKUP_CTL_LNKC BIT(0) /* Link Status Change Wakeup Enable*/ -#define NGBE_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */ -#define NGBE_PSR_WKUP_CTL_EX BIT(2) /* Directed Exact Wakeup Enable */ -#define NGBE_PSR_WKUP_CTL_MC BIT(3) /* Directed Multicast Wakeup Enable*/ -#define NGBE_PSR_WKUP_CTL_BC BIT(4) /* Broadcast Wakeup Enable */ -#define NGBE_PSR_WKUP_CTL_ARP BIT(5) /* ARP Request Packet Wakeup Enable*/ -#define NGBE_PSR_WKUP_CTL_IPV4 BIT(6) /* Directed IPv4 Pkt Wakeup Enable */ -#define NGBE_PSR_WKUP_CTL_IPV6 BIT(7) /* Directed IPv6 Pkt Wakeup Enable */ - -#define NGBE_FW_EEPROM_CHECKSUM_CMD 0xE9 -#define NGBE_FW_NVM_DATA_OFFSET 3 -#define NGBE_FW_CMD_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ -#define NGBE_FW_CMD_ST_PASS 0x80658383 -#define NGBE_FW_CMD_ST_FAIL 0x70657376 - -#define NGBE_MAX_FDIR_INDICES 7 - -#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) -#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) - -#define NGBE_ETH_LENGTH_OF_ADDRESS 6 -#define NGBE_MAX_MSIX_VECTORS 0x09 -#define NGBE_RAR_ENTRIES 32 -#define NGBE_RX_PB_SIZE 42 -#define NGBE_MC_TBL_SIZE 128 -#define NGBE_SP_VFT_TBL_SIZE 128 -#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */ - -/* TX/RX descriptor defines */ -#define NGBE_DEFAULT_TXD 512 /* default ring size */ -#define NGBE_DEFAULT_TX_WORK 256 -#define NGBE_MAX_TXD 8192 -#define NGBE_MIN_TXD 128 - -#define NGBE_DEFAULT_RXD 512 /* default ring size */ -#define NGBE_DEFAULT_RX_WORK 256 -#define NGBE_MAX_RXD 8192 -#define NGBE_MIN_RXD 128 - -extern char ngbe_driver_name[]; +#define NGBE_MAX_INT_RATE 500000 +#define NGBE_MIN_INT_RATE 980 +#define NGBE_MAX_EITR 0x00007FFCU +#define NGBE_MIN_EITR 4 +#define NGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U +#define NGBE_PX_ITR_LLI_CREDIT 0x001f0000U +#define NGBE_PX_ITR_LLI_MOD 0x00008000U +#define NGBE_PX_ITR_CNT_WDIS 0x80000000U +#define NGBE_PX_ITR_ITR_CNT 0x0FE00000U + +/* transmit DMA Registers */ +#define NGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define NGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define NGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define NGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +/* Transmit Config masks */ +#define NGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define NGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define NGBE_PX_TR_CFG_SWFLSH (1 << 26) /* Tx Desc. wr-bk flushing */ +#define NGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define NGBE_PX_TR_CFG_THRE_SHIFT 8 + +#define NGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ + (NGBE_PX_TR_RP((q_per_pool)*(vf_number) + (vf_q_index))) + +#define NGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ + (NGBE_PX_TR_WP((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Receive DMA Registers */ +#define NGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define NGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define NGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define NGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +/* PX_RR_CFG bit definitions */ +#define NGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define NGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define NGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define NGBE_PX_RR_CFG_VLAN 0x80000000U +#define NGBE_PX_RR_CFG_RSC 0x20000000U +#define NGBE_PX_RR_CFG_CNTAG 0x10000000U +#define NGBE_PX_RR_CFG_RSC_CNT_MD 0x08000000U +#define NGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define NGBE_PX_RR_CFG_STALL 0x02000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_1 0x00000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_4 0x00800000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_8 0x01000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_16 0x01800000U +#define NGBE_PX_RR_CFG_RR_THER 0x00070000U +#define NGBE_PX_RR_CFG_RR_THER_SHIFT 16 + +#define NGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define NGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define NGBE_PX_RR_CFG_RR_SZ 0x0000007EU +#define NGBE_PX_RR_CFG_RR_EN 0x00000001U + +/* statistic */ +#define NGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) /* [0,7] */ +#define NGBE_PX_BPRC(_i) (0x1024 + ((_i) * 64)) + + +#define NGBE_PX_MPTC(_i) (0x3020 + ((_i) * 64)) /* [0,7] */ +#define NGBE_PX_BPTC(_i) (0x3024 + ((_i) * 64)) + +#define NGBE_VX_GPRC 0x01014 +#define NGBE_VX_GORC_LSB 0x01018 +#define NGBE_VX_GORC_MSB 0x0101C +#define NGBE_VX_MPRC 0x01020 +#define NGBE_VX_BPRC 0x01024 + +#define NGBE_VX_GPTC 0x03014 +#define NGBE_VX_GOTC_LSB 0x03018 +#define NGBE_VX_GOTC_MSB 0x0301C +#define NGBE_VX_MPTC 0x03020 +#define NGBE_VX_BPTC 0x03024 + + + +#define NGBE_PX_GPRC 0x12504 + +#define NGBE_PX_GPTC 0x18308 + +#define NGBE_PX_GORC_LSB 0x12508 +#define NGBE_PX_GORC_MSB 0x1250C + +#define NGBE_PX_GOTC_LSB 0x1830C +#define NGBE_PX_GOTC_MSB 0x18310 + +/*************************** Flash region definition *************************/ +/* EEC Register */ +#define NGBE_EEC_SK 0x00000001U /* EEPROM Clock */ +#define NGBE_EEC_CS 0x00000002U /* EEPROM Chip Select */ +#define NGBE_EEC_DI 0x00000004U /* EEPROM Data In */ +#define NGBE_EEC_DO 0x00000008U /* EEPROM Data Out */ +#define NGBE_EEC_FWE_MASK 0x00000030U /* FLASH Write Enable */ +#define NGBE_EEC_FWE_DIS 0x00000010U /* Disable FLASH writes */ +#define NGBE_EEC_FWE_EN 0x00000020U /* Enable FLASH writes */ +#define NGBE_EEC_FWE_SHIFT 4 +#define NGBE_EEC_REQ 0x00000040U /* EEPROM Access Request */ +#define NGBE_EEC_GNT 0x00000080U /* EEPROM Access Grant */ +#define NGBE_EEC_PRES 0x00000100U /* EEPROM Present */ +#define NGBE_EEC_ARD 0x00000200U /* EEPROM Auto Read Done */ +#define NGBE_EEC_FLUP 0x00800000U /* Flash update command */ +#define NGBE_EEC_SEC1VAL 0x02000000U /* Sector 1 Valid */ +#define NGBE_EEC_FLUDONE 0x04000000U /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define NGBE_EEC_ADDR_SIZE 0x00000400U +#define NGBE_EEC_SIZE 0x00007800U /* EEPROM Size */ +#define NGBE_EERD_MAX_ADDR 0x00003FFFU /* EERD alows 14 bits for addr. */ + +#define NGBE_EEC_SIZE_SHIFT 11 +#define NGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define NGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define NGBE_FLA_LOCKED 0x00000040U + +/* Part Number String Length */ +#define NGBE_PBANUM_LENGTH 32 + +/* Checksum and EEPROM pointers */ +#define NGBE_PBANUM_PTR_GUARD 0xFAFA +#define NGBE_CHECKSUM_CAP_ST_PASS 0x80658383 +#define NGBE_CHECKSUM_CAP_ST_FAIL 0x70657376 +#define NGBE_ERR_ST 0xffffffff +#define NGBE_EEPROM_CHECKSUM 0x2F +#define NGBE_EEPROM_SUM 0xBABA +#define NGBE_OPTION_ROM_PTR 0x05 +#define NGBE_SHADOW_RAM_SIZE 0x4000 +#define NGBE_PCIE_CONFIG_SIZE 0x08 +#define NGBE_EEPROM_LAST_WORD 0x800 +#define NGBE_FW_PTR 0x0F +#define NGBE_SW_REGION_PTR 0x28 + +#define NGBE_CALSUM_COMMAND 0xE9 +#define NGBE_CALSUM_CAP_STATUS 0x10224 +#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C +#define NGBE_SAN_MAC_ADDR_PTR 0x18 +#define NGBE_DEVICE_CAPS 0x1C +#define NGBE_EEPROM_VERSION_L 0x1D +#define NGBE_EEPROM_VERSION_H 0x1E + +#define NGBE_MAX_MSIX_VECTORS_EMERALD 0x09 + +/* MSI-X capability fields masks */ +#define NGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* EEPROM Commands - SPI */ +#define NGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define NGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define NGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define NGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define NGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define NGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define NGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define NGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define NGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define NGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define NGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define NGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define NGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define NGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define NGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define NGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define NGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define NGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 + +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define NGBE_EEPROM_PAGE_SIZE_MAX 128 +#define NGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define NGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define NGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define NGBE_EEPROM_CCD_BIT 2 + +#ifndef NGBE_EEPROM_GRANT_ATTEMPTS +#define NGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +#ifndef NGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define NGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef NGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define NGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define NGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define NGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define NGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define NGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define NGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define NGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define NGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define NGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define NGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define NGBE_FW_LESM_STATE_1 0x1 +#define NGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define NGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define NGBE_FW_PATCH_VERSION_4 0x7 +#define NGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define NGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define NGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define NGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define NGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define NGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define NGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/******************************** PCI Bus Info *******************************/ +#define NGBE_PCI_DEVICE_STATUS 0xAA +#define NGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define NGBE_PCI_LINK_STATUS 0xB2 +#define NGBE_PCI_DEVICE_CONTROL2 0xC8 +#define NGBE_PCI_LINK_WIDTH 0x3F0 +#define NGBE_PCI_LINK_WIDTH_1 0x10 +#define NGBE_PCI_LINK_WIDTH_2 0x20 +#define NGBE_PCI_LINK_WIDTH_4 0x40 +#define NGBE_PCI_LINK_WIDTH_8 0x80 +#define NGBE_PCI_LINK_SPEED 0xF +#define NGBE_PCI_LINK_SPEED_2500 0x1 +#define NGBE_PCI_LINK_SPEED_5000 0x2 +#define NGBE_PCI_LINK_SPEED_8000 0x3 +#define NGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define NGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define NGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET 4 +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_MASK \ + (0x0001 << NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE \ + (0x01 << NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) + +#define NGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define NGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define NGBE_PCIDEVCTRL2_50_100us 0x1 +#define NGBE_PCIDEVCTRL2_1_2ms 0x2 +#define NGBE_PCIDEVCTRL2_16_32ms 0x5 +#define NGBE_PCIDEVCTRL2_65_130ms 0x6 +#define NGBE_PCIDEVCTRL2_260_520ms 0x9 +#define NGBE_PCIDEVCTRL2_1_2s 0xa +#define NGBE_PCIDEVCTRL2_4_8s 0xd +#define NGBE_PCIDEVCTRL2_17_34s 0xe + + +/******************* Receive Descriptor bit definitions **********************/ +#define NGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define NGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define NGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define NGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define NGBE_RXD_NEXTP_SHIFT 0x00000004U +#define NGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define NGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define NGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define NGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define NGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define NGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define NGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define NGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define NGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define NGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define NGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define NGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define NGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define NGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define NGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define NGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency + * Int */ +#define NGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define NGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define NGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define NGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define NGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define NGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define NGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define NGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define NGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define NGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define NGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define NGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define NGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define NGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define NGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define NGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define NGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define NGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define NGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define NGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU + +#define NGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define NGBE_RXD_TPID_MASK 0x000001C0U +#define NGBE_RXD_TPID_SHIFT 6 +#define NGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define NGBE_RXD_RSCCNT_MASK 0x001E0000U +#define NGBE_RXD_RSCCNT_SHIFT 17 +#define NGBE_RXD_HDRBUFLEN_SHIFT 5 +#define NGBE_RXD_SPLITHEADER_EN 0x00001000U +#define NGBE_RXD_SPH 0x8000 + +/* RSS Hash results */ +#define NGBE_RXD_RSSTYPE_NONE 0x00000000U +#define NGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define NGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define NGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define NGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define NGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define NGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define NGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define NGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define NGBE_PTYPE_TUN_IPV4 (0x80) +#define NGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define NGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define NGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define NGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define NGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define NGBE_PTYPE_PKT_MAC (0x10) +#define NGBE_PTYPE_PKT_IP (0x20) +#define NGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define NGBE_PTYPE_TYP_MAC (0x01) +#define NGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define NGBE_PTYPE_TYP_FIP (0x03) +#define NGBE_PTYPE_TYP_LLDP (0x04) +#define NGBE_PTYPE_TYP_CNM (0x05) +#define NGBE_PTYPE_TYP_EAPOL (0x06) +#define NGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define NGBE_PTYPE_PKT_IPV6 (0x08) +#define NGBE_PTYPE_TYP_IPFRAG (0x01) +#define NGBE_PTYPE_TYP_IP (0x02) +#define NGBE_PTYPE_TYP_UDP (0x03) +#define NGBE_PTYPE_TYP_TCP (0x04) +#define NGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define NGBE_PTYPE_PKT_VFT (0x08) +#define NGBE_PTYPE_TYP_FCOE (0x00) +#define NGBE_PTYPE_TYP_FCDATA (0x01) +#define NGBE_PTYPE_TYP_FCRDY (0x02) +#define NGBE_PTYPE_TYP_FCRSP (0x03) +#define NGBE_PTYPE_TYP_FCOTHER (0x04) + +/* Packet type non-ip values */ +enum ngbe_l2_ptypes { + NGBE_PTYPE_L2_ABORTED = (NGBE_PTYPE_PKT_MAC), + NGBE_PTYPE_L2_MAC = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_MAC), + NGBE_PTYPE_L2_TS = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_TS), + NGBE_PTYPE_L2_FIP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_FIP), + NGBE_PTYPE_L2_LLDP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_LLDP), + NGBE_PTYPE_L2_CNM = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_CNM), + NGBE_PTYPE_L2_EAPOL = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_EAPOL), + NGBE_PTYPE_L2_ARP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_ARP), + + NGBE_PTYPE_L2_IPV4_FRAG = (NGBE_PTYPE_PKT_IP | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV4 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV4_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV4_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV4_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_SCTP), + NGBE_PTYPE_L2_IPV6_FRAG = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV6 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV6_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV6_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV6_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_SCTP), + + NGBE_PTYPE_L2_FCOE = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_TYP_FCOE), + NGBE_PTYPE_L2_FCOE_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCOTHER), + NGBE_PTYPE_L2_FCOE_VFT = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_PKT_VFT), + NGBE_PTYPE_L2_FCOE_VFT_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_VFT_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_VFT_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCOTHER), + + NGBE_PTYPE_L2_TUN4_MAC = (NGBE_PTYPE_TUN_IPV4 | NGBE_PTYPE_PKT_IGM), + NGBE_PTYPE_L2_TUN6_MAC = (NGBE_PTYPE_TUN_IPV6 | NGBE_PTYPE_PKT_IGM), +}; + +#define NGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define NGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define NGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define NGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define NGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +#define NGBE_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) + +/* Security Processing bit Indication */ +#define NGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U +#define NGBE_RXD_LNKSEC_ERROR_REPLAY_ERROR 0x10000000U +#define NGBE_RXD_LNKSEC_ERROR_BIT_MASK 0x18000000U +#define NGBE_RXD_LNKSEC_ERROR_BAD_SIG 0x18000000U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define NGBE_RXD_ERR_FRAME_ERR_MASK NGBE_RXD_ERR_RXE + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define NGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define NGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define NGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define NGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define NGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define NGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define NGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define NGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define NGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define NGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define NGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define NGBE_TXD_RS 0x08000000U /* Report Status */ +#define NGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define NGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define NGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define NGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define NGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define NGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define NGBE_TXD_CC 0x00000080U /* Check Context */ +#define NGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define NGBE_TXD_IIPCS 0x00000400U +#define NGBE_TXD_EIPCS 0x00000800U +#define NGBE_TXD_L4CS 0x00000200U +#define NGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define NGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define NGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define NGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define NGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define NGBE_TXD_ENC_SHIFT 15 + +#define NGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define NGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define NGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define NGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define NGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define NGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define NGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define NGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define NGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define NGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define NGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define NGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define NGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define NGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define NGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define NGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define NGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define NGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define NGBE_TXD_TUNNEL_UDP (0x0ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) +#define NGBE_TXD_TUNNEL_GRE (0x1ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) + + +/************ ngbe_type.h ************/ +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define NGBE_REQ_TX_DESCRIPTOR_MULTIPLE 128 +#define NGBE_REQ_RX_DESCRIPTOR_MULTIPLE 128 +#define NGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define NGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define NGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define NGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define NGBE_TX_DESC_SPECIAL_PRI_SHIFT NGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor */ +union ngbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union ngbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ngbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/************************* Flow Directory HASH *******************************/ +/* Software ATR hash keys */ +#define NGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define NGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define NGBE_ATR_HASH_MASK 0x7fff +#define NGBE_ATR_L4TYPE_MASK 0x3 +#define NGBE_ATR_L4TYPE_UDP 0x1 +#define NGBE_ATR_L4TYPE_TCP 0x2 +#define NGBE_ATR_L4TYPE_SCTP 0x3 +#define NGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define NGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ngbe_atr_flow_type { + NGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + NGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + NGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + NGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + NGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + NGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + NGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + NGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + NGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + NGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + NGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + NGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + NGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + NGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + NGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + NGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union ngbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ngbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + + +/****************** Manageablility Host Interface defines ********************/ +#define NGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define NGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define NGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define NGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define NGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define NGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define NGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 +#define FW_EEPROM_CHECK_STATUS 0xE9 +#define FW_PHY_LED_CONF 0xF1 +#define FW_PHY_SIGNAL 0xF0 + + +/* Host Interface Command Structures */ +struct ngbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ngbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ngbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ngbe_hic_hdr2 { + struct ngbe_hic_hdr2_req req; + struct ngbe_hic_hdr2_rsp rsp; +}; + +struct ngbe_hic_drv_info { + struct ngbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct ngbe_hic_read_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_write_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_disable_rxen { + struct ngbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_reset { + struct ngbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct ngbe_hic_phy_cfg { + struct ngbe_hic_hdr hdr; + u8 lan_id; + u8 phy_mode; + u16 phy_speed; +}; + +enum ngbe_module_id { + NGBE_MODULE_EEPROM = 0, + NGBE_MODULE_FIRMWARE, + NGBE_MODULE_HARDWARE, + NGBE_MODULE_PCIE +}; + +struct ngbe_hic_upg_start { + struct ngbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_upg_write { + struct ngbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum ngbe_upg_flag { + NGBE_RESET_NONE = 0, + NGBE_RESET_FIRMWARE, + NGBE_RELOAD_EEPROM, + NGBE_RESET_LAN +}; + +struct ngbe_hic_upg_verify { + struct ngbe_hic_hdr hdr; + u32 action_flag; +}; + +struct ngbe_hic_write_lldp{ + struct ngbe_hic_hdr hdr; + u8 func; + u8 pad2; + u16 pad3; +}; + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define NGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define NGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define NGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* DCB registers */ +#define NGBE_DCB_MAX_TRAFFIC_CLASS 8 + +/* Power Manangbeent */ +/* DMA Coalescing configuration */ +struct ngbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + + +/* Autonegotiation advertised speeds */ +typedef u32 ngbe_autoneg_advertised; +/* Link speed */ +#define NGBE_LINK_SPEED_UNKNOWN 0 +#define NGBE_LINK_SPEED_100_FULL 1 +#define NGBE_LINK_SPEED_1GB_FULL 2 +#define NGBE_LINK_SPEED_10_FULL 8 +#define NGBE_LINK_SPEED_AUTONEG (NGBE_LINK_SPEED_100_FULL | \ + NGBE_LINK_SPEED_1GB_FULL | \ + NGBE_LINK_SPEED_10_FULL) + +/* Physical layer type */ +typedef u32 ngbe_physical_layer; +#define NGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define NGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define NGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define NGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define NGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define NGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define NGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define NGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + + +/* Special PHY Init Routine */ +#define NGBE_PHY_INIT_OFFSET_NL 0x002B +#define NGBE_PHY_INIT_END_NL 0xFFFF +#define NGBE_CONTROL_MASK_NL 0xF000 +#define NGBE_DATA_MASK_NL 0x0FFF +#define NGBE_CONTROL_SHIFT_NL 12 +#define NGBE_DELAY_NL 0 +#define NGBE_DATA_NL 1 +#define NGBE_CONTROL_NL 0x000F +#define NGBE_CONTROL_EOL_NL 0x0FFF +#define NGBE_CONTROL_SOL_NL 0x0000 + +/* ethtool */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 + + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define NGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define NGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define NGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define NGBE_CABLE_DC 5556 /* Delay Copper */ +#define NGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define NGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define NGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define NGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define NGBE_ID_X540 (NGBE_MAC_DC + NGBE_XAUI_DC + NGBE_PHY_DC) + +/* Calculate Interface Delay */ +#define NGBE_PHY_D 12800 +#define NGBE_MAC_D 4096 +#define NGBE_XAUI_D (2 * 1024) + +#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define NGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define NGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define NGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID_X540) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + + +/* Calculate delay value in bit times */ +#define NGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define NGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * NGBE_B2BT(_max_frame_tc) + \ + (36 * NGBE_PCI_DELAY / 25) + 1) + +#define NGBE_LOW_DV(_max_frame_tc) \ + (2 * NGBE_LOW_DV_X540(_max_frame_tc)) + + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ngbe_fcoe_boot_status { + ngbe_fcoe_bootstatus_disabled = 0, + ngbe_fcoe_bootstatus_enabled = 1, + ngbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ngbe_eeprom_type { + ngbe_eeprom_uninitialized = 0, + ngbe_eeprom_spi, + ngbe_flash, + ngbe_eeprom_none /* No NVM support */ +}; + +enum ngbe_phy_type { + ngbe_phy_unknown = 0, + ngbe_phy_none, + ngbe_phy_internal, + ngbe_phy_m88e1512, + ngbe_phy_m88e1512_sfi, + ngbe_phy_m88e1512_unknown, + ngbe_phy_yt8521s, + ngbe_phy_yt8521s_sfi, + ngbe_phy_sfp_passive_tyco, + ngbe_phy_sfp_passive_unknown, + ngbe_phy_sfp_active_unknown, + ngbe_phy_sfp_avago, + ngbe_phy_sfp_ftl, + ngbe_phy_sfp_ftl_active, + ngbe_phy_sfp_unknown, + ngbe_phy_sfp_intel, + ngbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ngbe_phy_internal_yt8521s_sfi, + ngbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum ngbe_sfp_type { + ngbe_sfp_type_da_cu = 0, + ngbe_sfp_type_sr = 1, + ngbe_sfp_type_lr = 2, + ngbe_sfp_type_da_cu_core0 = 3, + ngbe_sfp_type_da_cu_core1 = 4, + ngbe_sfp_type_srlr_core0 = 5, + ngbe_sfp_type_srlr_core1 = 6, + ngbe_sfp_type_da_act_lmt_core0 = 7, + ngbe_sfp_type_da_act_lmt_core1 = 8, + ngbe_sfp_type_1g_cu_core0 = 9, + ngbe_sfp_type_1g_cu_core1 = 10, + ngbe_sfp_type_1g_sx_core0 = 11, + ngbe_sfp_type_1g_sx_core1 = 12, + ngbe_sfp_type_1g_lx_core0 = 13, + ngbe_sfp_type_1g_lx_core1 = 14, + ngbe_sfp_type_not_present = 0xFFFE, + ngbe_sfp_type_unknown = 0xFFFF +}; + +enum ngbe_media_type { + ngbe_media_type_unknown = 0, + ngbe_media_type_fiber, + ngbe_media_type_copper, + ngbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ngbe_fc_mode { + ngbe_fc_none = 0, + ngbe_fc_rx_pause, + ngbe_fc_tx_pause, + ngbe_fc_full, + ngbe_fc_default +}; + +/* Smart Speed Settings */ +#define NGBE_SMARTSPEED_MAX_RETRIES 3 +enum ngbe_smart_speed { + ngbe_smart_speed_auto = 0, + ngbe_smart_speed_on, + ngbe_smart_speed_off +}; + +/* PCI bus types */ +enum ngbe_bus_type { + ngbe_bus_type_unknown = 0, + ngbe_bus_type_pci, + ngbe_bus_type_pcix, + ngbe_bus_type_pci_express, + ngbe_bus_type_internal, + ngbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ngbe_bus_speed { + ngbe_bus_speed_unknown = 0, + ngbe_bus_speed_33 = 33, + ngbe_bus_speed_66 = 66, + ngbe_bus_speed_100 = 100, + ngbe_bus_speed_120 = 120, + ngbe_bus_speed_133 = 133, + ngbe_bus_speed_2500 = 2500, + ngbe_bus_speed_5000 = 5000, + ngbe_bus_speed_8000 = 8000, + ngbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ngbe_bus_width { + ngbe_bus_width_unknown = 0, + ngbe_bus_width_pcie_x1 = 1, + ngbe_bus_width_pcie_x2 = 2, + ngbe_bus_width_pcie_x4 = 4, + ngbe_bus_width_pcie_x8 = 8, + ngbe_bus_width_32 = 32, + ngbe_bus_width_64 = 64, + ngbe_bus_width_reserved +}; + +struct ngbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ngbe_bus_info { + enum pci_bus_speed speed; + enum pcie_link_width width; + enum ngbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ngbe_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ngbe_fc_mode current_mode; /* FC mode in effect */ + enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ngbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ngbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ngbe_mc_addr_itr) (struct ngbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ngbe_eeprom_operations { + int (*init_params)(struct ngbe_hw *); + int (*read)(struct ngbe_hw *, u16, u16 *); + int (*read_buffer)(struct ngbe_hw *, u16, u16, u16 *); + int (*read32)(struct ngbe_hw *, u16, u32 *); + int (*write)(struct ngbe_hw *, u16, u16); + int (*write_buffer)(struct ngbe_hw *, u16, u16, u16 *); + int (*validate_checksum)(struct ngbe_hw *, u16 *); + int (*update_checksum)(struct ngbe_hw *); + int (*calc_checksum)(struct ngbe_hw *); + int (*eeprom_chksum_cap_st)(struct ngbe_hw *, u16, u32 *); + int (*phy_led_oem_chk)(struct ngbe_hw *, u32 *); + int (*phy_signal_set)(struct ngbe_hw *); +}; + +struct ngbe_flash_operations { + int (*init_params)(struct ngbe_hw *); + int (*read_buffer)(struct ngbe_hw *, u32, u32, u32 *); + int (*write_buffer)(struct ngbe_hw *, u32, u32, u32 *); + int (*check_led_oem)(struct ngbe_hw *); +}; + +struct ngbe_mac_operations { + int (*init_hw)(struct ngbe_hw *); + int (*reset_hw)(struct ngbe_hw *); + int (*start_hw)(struct ngbe_hw *); + int (*clear_hw_cntrs)(struct ngbe_hw *); + enum ngbe_media_type (*get_media_type)(struct ngbe_hw *); + int (*get_mac_addr)(struct ngbe_hw *, u8 *); + int (*get_device_caps)(struct ngbe_hw *, u16 *); + int (*stop_adapter)(struct ngbe_hw *); + int (*get_bus_info)(struct ngbe_hw *); + void (*set_lan_id)(struct ngbe_hw *); + int (*enable_rx_dma)(struct ngbe_hw *, u32); + int (*disable_sec_rx_path)(struct ngbe_hw *); + int (*enable_sec_rx_path)(struct ngbe_hw *); + int (*acquire_swfw_sync)(struct ngbe_hw *, u32); + void (*release_swfw_sync)(struct ngbe_hw *, u32); + + /* Link */ + void (*disable_tx_laser)(struct ngbe_hw *); + void (*enable_tx_laser)(struct ngbe_hw *); + void (*flap_tx_laser)(struct ngbe_hw *); + int (*setup_link)(struct ngbe_hw *, u32, bool); + int (*setup_mac_link)(struct ngbe_hw *, u32, bool); + int (*check_link)(struct ngbe_hw *, u32 *, bool *, bool); + int (*get_link_capabilities)(struct ngbe_hw *, u32 *, + bool *); + void (*set_rate_select_speed)(struct ngbe_hw *, u32); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ngbe_hw *, int, u32, int); + + /* LED */ + int (*led_on)(struct ngbe_hw *, u32); + int (*led_off)(struct ngbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + int (*set_rar)(struct ngbe_hw *, u32, u8 *, u64, u32); + int (*clear_rar)(struct ngbe_hw *, u32); + int (*insert_mac_addr)(struct ngbe_hw *, u8 *, u32); + int (*set_vmdq)(struct ngbe_hw *, u32, u32); + int (*set_vmdq_san_mac)(struct ngbe_hw *, u32); + int (*clear_vmdq)(struct ngbe_hw *, u32, u32); + int (*init_rx_addrs)(struct ngbe_hw *); + int (*update_uc_addr_list)(struct ngbe_hw *, u8 *, u32, + ngbe_mc_addr_itr); + int (*update_mc_addr_list)(struct ngbe_hw *, u8 *, u32, + ngbe_mc_addr_itr, bool clear); + int (*enable_mc)(struct ngbe_hw *); + int (*disable_mc)(struct ngbe_hw *); + int (*clear_vfta)(struct ngbe_hw *); + int (*set_vfta)(struct ngbe_hw *, u32, u32, bool); + int (*set_vlvf)(struct ngbe_hw *, u32, u32, bool, bool *); + int (*init_uta_tables)(struct ngbe_hw *); + void (*set_mac_anti_spoofing)(struct ngbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ngbe_hw *, bool, int); + + /* Flow Control */ + int (*fc_enable)(struct ngbe_hw *); + int (*setup_fc)(struct ngbe_hw *); + + /* Manageability interface */ + int (*set_fw_drv_ver)(struct ngbe_hw *, u8, u8, u8, u8); + int (*get_thermal_sensor_data)(struct ngbe_hw *); + int (*init_thermal_sensor_thresh)(struct ngbe_hw *hw); + void (*get_rtrup2tc)(struct ngbe_hw *hw, u8 *map); + void (*disable_rx)(struct ngbe_hw *hw); + void (*enable_rx)(struct ngbe_hw *hw); + void (*set_source_address_pruning)(struct ngbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ngbe_hw *, bool, int); + int (*dmac_config)(struct ngbe_hw *hw); + int (*setup_eee)(struct ngbe_hw *hw, bool enable_eee); +}; + +struct ngbe_phy_operations { + int (*identify)(struct ngbe_hw *); + int (*identify_sfp)(struct ngbe_hw *); + int (*init)(struct ngbe_hw *); + int (*reset)(struct ngbe_hw *); + int (*read)(struct ngbe_hw *, int, int); + int (*write)(struct ngbe_hw *, int, int, u16); + int (*read_reg)(struct ngbe_hw *, u32, u32, u16 *); + int (*write_reg)(struct ngbe_hw *, u32, u32, u16); + int (*read_reg_mdi)(struct ngbe_hw *, u32, u32, u16 *); + int (*write_reg_mdi)(struct ngbe_hw *, u32, u32, u16); + u32 (*setup_link)(struct ngbe_hw *, u32, bool); + int (*phy_suspend)(struct ngbe_hw *hw); + int (*phy_resume)(struct ngbe_hw *hw); + u32 (*phy_led_ctrl)(struct ngbe_hw *); + int (*setup_internal_link)(struct ngbe_hw *); + u32 (*setup_link_speed)(struct ngbe_hw *, u32, bool); + int (*check_link)(struct ngbe_hw *, u32 *, bool *); + int (*check_overtemp)(struct ngbe_hw *); + int (*check_event)(struct ngbe_hw *); + int (*get_adv_pause)(struct ngbe_hw *, u8 *); + int (*get_lp_adv_pause)(struct ngbe_hw *, u8 *); + int (*set_adv_pause)(struct ngbe_hw *, u16); + int (*setup_once)(struct ngbe_hw *); +}; + +struct ngbe_eeprom_info { + struct ngbe_eeprom_operations ops; + enum ngbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; + u16 sw_region_offset; +}; + +struct ngbe_flash_info { + struct ngbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + +#define NGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ngbe_mac_info { + struct ngbe_mac_operations ops; + u8 addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define NGBE_MAX_MTA 128 +#define NGBE_MAX_VFTA_ENTRIES 128 + u32 mta_shadow[NGBE_MAX_MTA]; + int mc_filter_type; + u32 mcft_size; + u32 vft_shadow[NGBE_MAX_VFTA_ENTRIES]; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 san_mac_rar_index; + bool get_link_status; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct ngbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct ngbe_dmac_config dmac_config; + bool set_lben; + bool autoneg; +}; + +struct ngbe_phy_info { + struct ngbe_phy_operations ops; + enum ngbe_phy_type type; + u32 addr; + u32 id; + enum ngbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ngbe_media_type media_type; + u32 phy_semaphore_mask; + u8 lan_id; /* to be delete */ + ngbe_autoneg_advertised autoneg_advertised; + enum ngbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + ngbe_physical_layer link_mode; + u32 force_speed; +}; + +#include "ngbe_mbx.h" + +struct ngbe_mbx_operations { + void (*init_params)(struct ngbe_hw *hw); + int (*read)(struct ngbe_hw *, u32 *, u16, u16); + int (*write)(struct ngbe_hw *, u32 *, u16, u16); + int (*read_posted)(struct ngbe_hw *, u32 *, u16, u16); + int (*write_posted)(struct ngbe_hw *, u32 *, u16, u16); + int (*check_for_msg)(struct ngbe_hw *, u16); + int (*check_for_ack)(struct ngbe_hw *, u16); + int (*check_for_rst)(struct ngbe_hw *, u16); +}; + +struct ngbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ngbe_mbx_info { + struct ngbe_mbx_operations ops; + struct ngbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +enum ngbe_reset_type { + NGBE_LAN_RESET = 0, + NGBE_SW_RESET, + NGBE_GLOBAL_RESET +}; + +enum ngbe_link_status { + NGBE_LINK_STATUS_NONE = 0, + NGBE_LINK_STATUS_KX, + NGBE_LINK_STATUS_KX4 +}; + +enum em_mac_type { + em_mac_type_unknown = 0, + em_mac_type_mdi, + em_mac_type_rgmii +}; + +struct ngbe_hw { + u8 IOMEM *hw_addr; + void *back; + struct ngbe_mac_info mac; + struct ngbe_addr_filter_info addr_ctrl; + struct ngbe_fc_info fc; + struct ngbe_phy_info phy; + struct ngbe_eeprom_info eeprom; + struct ngbe_flash_info flash; + struct ngbe_bus_info bus; + struct ngbe_mbx_info mbx; + enum em_mac_type mac_type; + + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + enum ngbe_reset_type reset_type; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + enum ngbe_link_status link_status; + u16 tpid[8]; + bool gpio_ctl; + bool ncsi_enabled; + u8 restart_an; + u16 oem_ssid; + u16 oem_svid; + spinlock_t phy_lock; +}; + +/* Error Codes */ +#define NGBE_OK 0 +#define NGBE_ERR 100 +#define NGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-NGBE_ERR, NGBE_ERR): reserved for non-ngbe defined error code */ +#define NGBE_ERR_NOSUPP -(NGBE_ERR+0) +#define NGBE_ERR_EEPROM -(NGBE_ERR+1) +#define NGBE_ERR_EEPROM_CHECKSUM -(NGBE_ERR+2) +#define NGBE_ERR_PHY -(NGBE_ERR+3) +#define NGBE_ERR_CONFIG -(NGBE_ERR+4) +#define NGBE_ERR_PARAM -(NGBE_ERR+5) +#define NGBE_ERR_MAC_TYPE -(NGBE_ERR+6) +#define NGBE_ERR_UNKNOWN_PHY -(NGBE_ERR+7) +#define NGBE_ERR_LINK_SETUP -(NGBE_ERR+8) +#define NGBE_ERR_ADAPTER_STOPPED -(NGBE_ERR+09) +#define NGBE_ERR_INVALID_MAC_ADDR -(NGBE_ERR+10) +#define NGBE_ERR_DEVICE_NOT_SUPPORTED -(NGBE_ERR+11) +#define NGBE_ERR_MASTER_REQUESTS_PENDING -(NGBE_ERR+12) +#define NGBE_ERR_INVALID_LINK_SETTINGS -(NGBE_ERR+13) +#define NGBE_ERR_AUTONEG_NOT_COMPLETE -(NGBE_ERR+14) +#define NGBE_ERR_RESET_FAILED -(NGBE_ERR+15) +#define NGBE_ERR_SWFW_SYNC -(NGBE_ERR+16) +#define NGBE_ERR_PHY_ADDR_INVALID -(NGBE_ERR+17) +#define NGBE_ERR_I2C -(NGBE_ERR+18) +#define NGBE_ERR_SFP_NOT_SUPPORTED -(NGBE_ERR+19) +#define NGBE_ERR_SFP_NOT_PRESENT -(NGBE_ERR+20) +#define NGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(NGBE_ERR+21) +#define NGBE_ERR_NO_SAN_ADDR_PTR -(NGBE_ERR+22) +#define NGBE_ERR_FDIR_REINIT_FAILED -(NGBE_ERR+23) +#define NGBE_ERR_EEPROM_VERSION -(NGBE_ERR+24) +#define NGBE_ERR_NO_SPACE -(NGBE_ERR+25) +#define NGBE_ERR_OVERTEMP -(NGBE_ERR+26) +#define NGBE_ERR_UNDERTEMP -(NGBE_ERR+27) +#define NGBE_ERR_FC_NOT_NEGOTIATED -(NGBE_ERR+28) +#define NGBE_ERR_FC_NOT_SUPPORTED -(NGBE_ERR+29) +#define NGBE_ERR_SFP_SETUP_NOT_COMPLETE -(NGBE_ERR+30) +#define NGBE_ERR_PBA_SECTION -(NGBE_ERR+31) +#define NGBE_ERR_INVALID_ARGUMENT -(NGBE_ERR+32) +#define NGBE_ERR_HOST_INTERFACE_COMMAND -(NGBE_ERR+33) +#define NGBE_ERR_OUT_OF_MEM -(NGBE_ERR+34) +#define NGBE_ERR_FEATURE_NOT_SUPPORTED -(NGBE_ERR+36) +#define NGBE_ERR_EEPROM_PROTECTED_REGION -(NGBE_ERR+37) +#define NGBE_ERR_FDIR_CMD_INCOMPLETE -(NGBE_ERR+38) +#define NGBE_ERR_FLASH_LOADING_FAILED -(NGBE_ERR+39) +#define NGBE_ERR_XPCS_POWER_UP_FAILED -(NGBE_ERR+40) +#define NGBE_ERR_FW_RESP_INVALID -(NGBE_ERR+41) +#define NGBE_ERR_PHY_INIT_NOT_DONE -(NGBE_ERR+42) +#define NGBE_ERR_TIMEOUT -(NGBE_ERR+43) +#define NGBE_ERR_TOKEN_RETRY -(NGBE_ERR+44) +#define NGBE_ERR_REGISTER -(NGBE_ERR+45) +#define NGBE_ERR_MBX -(NGBE_ERR+46) +#define NGBE_ERR_MNG_ACCESS_FAILED -(NGBE_ERR+47) +#define NGBE_ERR_PHY_TYPE -(NGBE_ERR+48) +#define NGBE_ERR_PHY_TIMEOUT -(NGBE_ERR+49) + +/** + * register operations + **/ +/* read register */ +#define NGBE_DEAD_READ_RETRIES 10 +#define NGBE_DEAD_READ_REG 0xdeadbeefU +#define NGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL + +#define NGBE_FAILED_READ_REG 0xffffffffU +#define NGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +#define NGBE_LLDP_REG 0xf1000 +#define NGBE_LLDP_ON 0x0000000f + +static inline bool NGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +ngbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct ngbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + + return val; +} +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline u32 +rd32m(struct ngbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +ngbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct ngbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + ngbe_wr32(base + reg, val); +} +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct ngbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + ngbe_wr32(base + reg, val); +} + +/* poll register */ +#define NGBE_MDIO_TIMEOUT 1000 +#define NGBE_I2C_TIMEOUT 1000 +#define NGBE_SPI_TIMEOUT 1000 +static inline int +po32m(struct ngbe_hw *hw, u32 reg, + u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + + usec_delay(usecs); + } while (true); + + return (count - loop <= count ? 0 : NGBE_ERR_TIMEOUT); +} + +#define NGBE_WRITE_FLUSH(H) rd32(H, NGBE_MIS_PWR) #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 7507f762edfe54de6dfdd27ea61ebcd0f380bffb..0ed2cc1350fb35b8f7927dfb45ddbd05c867da56 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -9,4 +9,24 @@ obj-$(CONFIG_TXGBE) += txgbe.o txgbe-objs := txgbe_main.o \ txgbe_hw.o \ txgbe_phy.o \ - txgbe_ethtool.o + txgbe_ethtool.o \ + txgbe_bp.o \ + txgbe_dcb_nl.o \ + txgbe_dcb.o \ + txgbe_debugfs.o \ + txgbe_fcoe.o \ + txgbe_mbx.o \ + txgbe_mtd.o \ + txgbe_param.o \ + txgbe_ptp.o \ + txgbe_procfs.o \ + txgbe_sriov.o \ + txgbe_sysfs.o \ + txgbe_xsk.o \ + txgbe_lib.o \ + txgbe_pcierr.o \ + txgbe_e56.o \ + txgbe_e56_bp.o \ + txgbe_aml.o \ + txgbe_aml40.o \ + txgbe_kcompat.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h new file mode 100644 index 0000000000000000000000000000000000000000..464db2d3ecfd533c2190c418a3c87fdc18283192 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -0,0 +1,1605 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _TXGBE_H_ +#define _TXGBE_H_ + +#ifndef TXGBE_NO_LRO +#include +#else +#include +#endif + +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) +#include +#endif + +#include "txgbe_dcb.h" + +#include "txgbe_kcompat.h" + +#ifdef HAVE_NDO_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +#ifdef HAVE_SCTP +#include +#endif + +#ifdef HAVE_INCLUDE_LINUX_MDIO_H +#include +#endif + +#if IS_ENABLED(CONFIG_FCOE) +#include "txgbe_fcoe.h" +#endif /* CONFIG_FCOE */ + +#include + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#include +#endif /* HAVE_PTP_1588_CLOCK */ + +/* Ether Types */ +#define TXGBE_ETH_P_LLDP 0x88CC +#define TXGBE_ETH_P_CNM 0x22E7 + +#ifdef HAVE_XDP_SUPPORT +DECLARE_STATIC_KEY_FALSE(txgbe_xdp_locking_key); +#endif + +#ifndef XDP_PACKET_HEADROOM +#define XDP_PACKET_HEADROOM 256 +#endif + +/* TX/RX descriptor defines */ +#if defined(DEFAULT_TXD) || defined(DEFAULT_TX_WORK) +#define TXGBE_DEFAULT_TXD DEFAULT_TXD +#define TXGBE_DEFAULT_TX_WORK DEFAULT_TX_WORK +#else +#define TXGBE_DEFAULT_TXD 1024 +#define TXGBE_DEFAULT_TX_WORK 256 +#endif +#define TXGBE_MAX_TXD 8192 +#define TXGBE_MIN_TXD 128 +#define TXGBE_MAX_TX_WORK 65535 + +#if (PAGE_SIZE < 8192) +#define TXGBE_DEFAULT_RXD 512 +#define TXGBE_DEFAULT_RX_WORK 256 +#else +#define TXGBE_DEFAULT_RXD 256 +#define TXGBE_DEFAULT_RX_WORK 128 +#endif + +#define TXGBE_MAX_RXD 8192 +#define TXGBE_MIN_RXD 128 + +#define TXGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define TXGBE_MIN_FCRTL 0x40 +#define TXGBE_MAX_FCRTL 0x7FF80 +#define TXGBE_MIN_FCRTH 0x600 +#define TXGBE_MAX_FCRTH 0x7FFF0 +#if defined(DEFAULT_FCPAUSE) +#define TXGBE_DEFAULT_FCPAUSE DEFAULT_FCPAUSE /*0x3800*/ +#else +#define TXGBE_DEFAULT_FCPAUSE 0xFFFF +#endif +#define TXGBE_MIN_FCPAUSE 0 +#define TXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define TXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define TXGBE_RXBUFFER_2K 2048 +#define TXGBE_RXBUFFER_3K 3072 +#define TXGBE_RXBUFFER_4K 4096 +#define TXGBE_RXBUFFER_1536 1536 +#define TXGBE_RXBUFFER_7K 7168 +#define TXGBE_RXBUFFER_8K 8192 +#define TXGBE_RXBUFFER_15K 15360 +#define TXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#define TXGBE_BP_M_NULL 0 +#define TXGBE_BP_M_SFI 1 +#define TXGBE_BP_M_KR 2 +#define TXGBE_BP_M_KX4 3 +#define TXGBE_BP_M_KX 4 +#define TXGBE_BP_M_NAUTO 0 +#define TXGBE_BP_M_AUTO 1 + + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define TXGBE_RX_HDR_SIZE TXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define TXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#ifdef HAVE_STRUCT_DMA_ATTRS +#define TXGBE_RX_DMA_ATTR NULL +#else +#define TXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +/* assume the kernel supports 8021p to avoid stripping vlan tags */ +#ifdef TXGBE_DISABLE_8021P_SUPPORT +#ifndef HAVE_8021P_SUPPORT +#define HAVE_8021P_SUPPORT +#endif +#endif /* TXGBE_DISABLE_8021P_SUPPORT */ + +enum txgbe_tx_flags { + /* cmd_type flags */ + TXGBE_TX_FLAGS_HW_VLAN = 0x01, + TXGBE_TX_FLAGS_TSO = 0x02, + TXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + TXGBE_TX_FLAGS_CC = 0x08, + TXGBE_TX_FLAGS_IPV4 = 0x10, + TXGBE_TX_FLAGS_CSUM = 0x20, + TXGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + TXGBE_TX_FLAGS_LINKSEC = 0x200, + TXGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + TXGBE_TX_FLAGS_SW_VLAN = 0x40, + TXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define TXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define TXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define TXGBE_MAX_RX_DESC_POLL 10 + +#define TXGBE_MAX_VF_MC_ENTRIES 30 +#define TXGBE_MAX_VF_FUNCTIONS 64 +#define MAX_EMULATION_MAC_ADDRS 16 +#define TXGBE_MAX_PF_MACVLANS 15 +#define TXGBE_VF_DEVICE_ID 0x1000 + +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define TXGBE_VF_MAX_TX_QUEUES 4 + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 IOMEM *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + __be16 vlan_proto; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; + int link_enable; + int link_state; + +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + bool rss_query_enabled; +#endif + u8 trusted; + int xcast_mode; + unsigned int vf_api; + u16 ft_filter_idx[TXGBE_MAX_RDB_5T_CTL0_FILTERS]; + u16 queue_max_tx_rate[TXGBE_VF_MAX_TX_QUEUES]; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#ifndef TXGBE_NO_LRO +#define TXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/ +#define TXGBE_LRO_GLOBAL 10 + +struct txgbe_lro_stats { + u32 flushed; + u32 coal; +}; + +/* + * txgbe_lro_header - header format to be aggregated by LRO + * @iph: IP header without options + * @tcp: TCP header + * @ts: Optional TCP timestamp data in TCP options + * + * This structure relies on the check above that verifies that the header + * is IPv4 and does not contain any options. + */ +struct txgbe_lrohdr { + struct iphdr iph; + struct tcphdr th; + __be32 ts[0]; +}; + +struct txgbe_lro_list { + struct sk_buff_head active; + struct txgbe_lro_stats stats; +}; + +#endif /* TXGBE_NO_LRO */ +#define TXGBE_MAX_TXD_PWR 14 +#define TXGBE_MAX_DATA_PER_TXD (1 << TXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), TXGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +#define DESC_RESERVED 96 +#define DESC_RESERVED_AML 192 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct txgbe_tx_buffer { + union txgbe_tx_desc *next_to_watch; + u32 next_eop; + unsigned long time_stamp; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ +#ifdef HAVE_XDP_FRAME_STRUCT + struct xdp_frame *xdpf; +#else + void *data; +#endif + }; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + void *va; + u32 tx_flags; +}; + +struct txgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + dma_addr_t page_dma; + union{ + struct { + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; + }; +#ifdef HAVE_AF_XDP_ZC_SUPPORT + struct { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + void *addr; + u64 handle; +#else + bool discard; + struct xdp_buff *xdp; +#endif + }; +#endif + }; +#endif +}; + +struct txgbe_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct txgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct txgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +#define TXGBE_TS_HDR_LEN 8 +enum txgbe_ring_state_t { +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + __TXGBE_RX_3K_BUFFER, + __TXGBE_RX_BUILD_SKB_ENABLED, +#endif + __TXGBE_TX_FDIR_INIT_DONE, + __TXGBE_TX_XPS_INIT_DONE, + __TXGBE_TX_DETECT_HANG, + __TXGBE_HANG_CHECK_ARMED, + __TXGBE_RX_HS_ENABLED, + __TXGBE_RX_RSC_ENABLED, + __TXGBE_TX_XDP_RING, +#if IS_ENABLED(CONFIG_FCOE) + __TXGBE_RX_FCOE, +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT + __TXGBE_TX_DISABLED, +#endif +}; + +struct txgbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct txgbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#define ring_is_hs_enabled(ring) \ + test_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define set_ring_hs_enabled(ring) \ + set_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#ifndef TXGBE_NO_HW_RSC +#define ring_is_rsc_enabled(ring) \ + test_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#else +#define ring_is_rsc_enabled(ring) false +#endif +#define set_ring_rsc_enabled(ring) \ + set_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) + +#define ring_is_xdp(ring) \ + test_bit(__TXGBE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__TXGBE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__TXGBE_TX_XDP_RING, &(ring)->state) + +struct txgbe_ring { + struct txgbe_ring *next; /* pointer to next ring in q_vector */ + struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct bpf_prog *xdp_prog; + struct txgbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct txgbe_tx_buffer *tx_buffer_info; + struct txgbe_rx_buffer *rx_buffer_info; + }; + spinlock_t tx_lock; /* used in XDP mode */ + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_free; + u16 rx_offset; + +#ifdef HAVE_PTP_1588_CLOCK + unsigned long last_rx_timestamp; + +#endif + u16 rx_buf_len; + union { +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIs + union { + u16 next_to_alloc; + u16 next_rs_idx; + }; +#endif + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT + u16 xdp_tx_active; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ + + u8 dcb_tc; + struct txgbe_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif + union { + struct txgbe_tx_queue_stats tx_stats; + struct txgbe_rx_queue_stats rx_stats; + }; +#ifdef HAVE_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_NETDEV_BPF_XSK_POOL + struct xsk_buff_pool *xsk_pool; +#else + struct xdp_umem *xsk_pool; +#endif +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct zero_copy_allocator zca; /* ZC allocator anchor */ +#endif +#endif +#endif + dma_addr_t headwb_dma; + u32 *headwb_mem; +} ____cacheline_internodealigned_in_smp; + +enum txgbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, +#if IS_ENABLED(CONFIG_FCOE) + RING_F_FCOE, +#endif /* CONFIG_FCOE */ + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define TXGBE_MAX_DCB_INDICES 8 +#define TXGBE_MAX_XDP_RSS_INDICES 32 +#define TXGBE_MAX_RSS_INDICES 63 +#define TXGBE_MAX_VMDQ_INDICES 64 +#define TXGBE_MAX_FDIR_INDICES 63 +#if IS_ENABLED(CONFIG_FCOE) +#define TXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + TXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + TXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#endif /* CONFIG_FCOE */ +#define MAX_XDP_QUEUES 32 + +#define TXGBE_MAX_L2A_QUEUES 4 +#define TXGBE_BAD_L2A_QUEUE 3 + +#define TXGBE_MAX_MACVLANS 32 +#define TXGBE_MAX_DCBMACVLANS 8 + +struct txgbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +#define TXGBE_VMDQ_8Q_MASK 0x78 +#define TXGBE_VMDQ_4Q_MASK 0x7C +#define TXGBE_VMDQ_2Q_MASK 0x7E + +#define TXGBE_RSS_64Q_MASK 0x3F +#define TXGBE_RSS_16Q_MASK 0xF +#define TXGBE_RSS_8Q_MASK 0x7 +#define TXGBE_RSS_4Q_MASK 0x3 +#define TXGBE_RSS_2Q_MASK 0x1 +#define TXGBE_RSS_DISABLED_MASK 0x0 + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + + +#if (PAGE_SIZE < 8192) +#define TXGBE_MAX_2K_FRAME_BUILD_SKB (TXGBE_RXBUFFER_1536 - NET_IP_ALIGN) +#define TXGBE_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + TXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(TXGBE_RXBUFFER_2K)) + +static inline int txgbe_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int txgbe_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (TXGBE_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = TXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = TXGBE_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return txgbe_compute_pad(rx_buf_len); +} + +#define TXGBE_SKB_PAD txgbe_skb_pad() +#else +#define TXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(TXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + if (test_bit(__TXGBE_RX_3K_BUFFER, &ring->state)) + return TXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return TXGBE_MAX_2K_FRAME_BUILD_SKB; +#endif + return TXGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *ring) +{ +#if (PAGE_SIZE < 8192) + if (test_bit(__TXGBE_RX_3K_BUFFER, &ring->state)) + return 1; +#endif + return 0; +} +#define txgbe_rx_pg_size(_ring) (PAGE_SIZE << txgbe_rx_pg_order(_ring)) + +static inline unsigned int txgbe_rx_offset(struct txgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? TXGBE_SKB_PAD : 0; +} + + +#endif +struct txgbe_ring_container { + struct txgbe_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define txgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct txgbe_q_vector { + struct txgbe_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct txgbe_ring_container rx, tx; + + struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; +#endif +#ifndef TXGBE_NO_LRO + struct txgbe_lro_list lrolist; /* LRO list for queue vector*/ +#endif + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + +#ifdef HAVE_NDO_BUSY_POLL + atomic_t state; +#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct txgbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#ifdef HAVE_NDO_BUSY_POLL +enum txgbe_qv_state_t { + TXGBE_QV_STATE_IDLE = 0, + TXGBE_QV_STATE_NAPI, + TXGBE_QV_STATE_POLL, + TXGBE_QV_STATE_DISABLE +}; + +static inline void txgbe_qv_init_lock(struct txgbe_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, TXGBE_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool txgbe_qv_lock_napi(struct txgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, TXGBE_QV_STATE_IDLE, + TXGBE_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != TXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == TXGBE_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void txgbe_qv_unlock_napi(struct txgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != TXGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, TXGBE_QV_STATE_IDLE); +} + +/* called from txgbe_low_latency_poll() */ +static inline bool txgbe_qv_lock_poll(struct txgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, TXGBE_QV_STATE_IDLE, + TXGBE_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != TXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + return rc == TXGBE_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void txgbe_qv_unlock_poll(struct txgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != TXGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, TXGBE_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool txgbe_qv_busy_polling(struct txgbe_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == TXGBE_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool txgbe_qv_disable(struct txgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, TXGBE_QV_STATE_IDLE, + TXGBE_QV_STATE_DISABLE); + + return rc == TXGBE_QV_STATE_IDLE; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +#ifdef TXGBE_HWMON + +#define TXGBE_HWMON_TYPE_TEMP 0 +#define TXGBE_HWMON_TYPE_ALARMTHRESH 1 +#define TXGBE_HWMON_TYPE_DALARMTHRESH 2 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct txgbe_hw *hw; + struct txgbe_thermal_diode_data *sensor; + char name[19]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; +#endif /* TXGBE_HWMON */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define TXGBE_MIN_RSC_ITR 24 +#define TXGBE_100K_ITR 40 +#define TXGBE_20K_ITR 200 +#define TXGBE_16K_ITR 248 +#define TXGBE_12K_ITR 336 + +#define TXGBE_ITR_ADAPTIVE_MIN_INC 2 +#define TXGBE_ITR_ADAPTIVE_MIN_USECS 10 +#define TXGBE_ITR_ADAPTIVE_MAX_USECS 84 +#define TXGBE_ITR_ADAPTIVE_LATENCY 0x80 +#define TXGBE_ITR_ADAPTIVE_BULK 0x00 +#define TXGBE_ITR_ADAPTIVE_MASK_USECS (TXGBE_ITR_ADAPTIVE_LATENCY - \ + TXGBE_ITR_ADAPTIVE_MIN_INC) + +/* txgbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 txgbe_test_staterr(union txgbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* txgbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 txgbe_desc_unused(struct txgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define TXGBE_RX_DESC(R, i) \ + (&(((union txgbe_rx_desc *)((R)->desc))[i])) +#define TXGBE_TX_DESC(R, i) \ + (&(((union txgbe_tx_desc *)((R)->desc))[i])) +#define TXGBE_TX_CTXTDESC(R, i) \ + (&(((struct txgbe_tx_context_desc *)((R)->desc))[i])) + +#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#if IS_ENABLED(CONFIG_FCOE) +/* use 3K as the baby jumbo frame size for FCoE */ +#define TXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* CONFIG_FCOE */ + +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +#define TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE 64 + +struct txgbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +#define TXGBE_MAC_STATE_DEFAULT 0x1 +#define TXGBE_MAC_STATE_MODIFIED 0x2 +#define TXGBE_MAC_STATE_IN_USE 0x4 + +#ifdef TXGBE_PROCFS +struct txgbe_therm_proc_data { + struct txgbe_hw *hw; + struct txgbe_thermal_diode_data *sensor_data; +}; +#endif + +/* + * Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE +#define MAX_MSIX_COUNT TXGBE_MAX_MSIX_VECTORS_SAPPHIRE + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define TXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define TXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +/** + * txgbe_adapter.flag + **/ +#define TXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define TXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define TXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define TXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#ifndef TXGBE_NO_LLI +#define TXGBE_FLAG_LLI_PUSH (u32)(1 << 4) +#endif + +#define TXGBE_FLAG_TPH_ENABLED (u32)(1 << 6) +#define TXGBE_FLAG_TPH_CAPABLE (u32)(1 << 7) +#define TXGBE_FLAG_TPH_ENABLED_DATA (u32)(1 << 8) + +#define TXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define TXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define TXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define TXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define TXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define TXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define TXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define TXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#if IS_ENABLED(CONFIG_FCOE) +#define TXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 17) +#define TXGBE_FLAG_FCOE_ENABLED (u32)(1 << 18) +#endif /* CONFIG_FCOE */ +#define TXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define TXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define TXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define TXGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define TXGBE_FLAG_NEED_ETH_PHY_RESET (u32)(1 << 28) +#define TXGBE_FLAG_RX_HS_ENABLED (u32)(1 << 30) +#define TXGBE_FLAG_LINKSEC_ENABLED (u32)(1 << 31) +#define TXGBE_FLAG_IPSEC_ENABLED (u32)(1 << 5) + +/* preset defaults */ +#define TXGBE_FLAGS_SP_INIT (TXGBE_FLAG_MSI_CAPABLE \ + | TXGBE_FLAG_MSIX_CAPABLE \ + | TXGBE_FLAG_MQ_CAPABLE \ + | TXGBE_FLAG_SRIOV_CAPABLE) + +/** + * txgbe_adapter.flag2 + **/ +#ifndef TXGBE_NO_HW_RSC +#define TXGBE_FLAG2_RSC_CAPABLE (1U << 0) +#define TXGBE_FLAG2_RSC_ENABLED (1U << 1) +#else +#define TXGBE_FLAG2_RSC_CAPABLE (0U) +#define TXGBE_FLAG2_RSC_ENABLED (0U) +#endif +#define TXGBE_FLAG2_TEMP_SENSOR_CAPABLE (1U << 3) +#define TXGBE_FLAG2_TEMP_SENSOR_EVENT (1U << 4) +#define TXGBE_FLAG2_SEARCH_FOR_SFP (1U << 5) +#define TXGBE_FLAG2_SFP_NEEDS_RESET (1U << 6) +#define TXGBE_FLAG2_PF_RESET_REQUESTED (1U << 7) +#define TXGBE_FLAG2_FDIR_REQUIRES_REINIT (1U << 8) +#define TXGBE_FLAG2_RSS_FIELD_IPV4_UDP (1U << 9) +#define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP (1U << 10) +#define TXGBE_FLAG2_RSS_ENABLED (1U << 12) +#define TXGBE_FLAG2_PTP_PPS_ENABLED (1U << 11) + +#define TXGBE_FLAG2_EEE_CAPABLE (1U << 14) +#define TXGBE_FLAG2_EEE_ENABLED (1U << 15) +#define TXGBE_FLAG2_VXLAN_REREG_NEEDED (1U << 16) +#define TXGBE_FLAG2_VLAN_PROMISC (1U << 17) +#define TXGBE_FLAG2_DEV_RESET_REQUESTED (1U << 18) +#define TXGBE_FLAG2_RESET_INTR_RECEIVED (1U << 19) +#define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED (1U << 20) +#define TXGBE_FLAG2_CLOUD_SWITCH_ENABLED (1U << 21) +#define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED (1U << 22) +#define KR (1U << 23) +#define TXGBE_FLAG2_KR_TRAINING (1U << 24) +#define TXGBE_FLAG2_KR_AUTO (1U << 25) +#define TXGBE_FLAG2_LINK_DOWN (1U << 26) +#define TXGBE_FLAG2_KR_PRO_DOWN (1U << 27) +#define TXGBE_FLAG2_KR_PRO_REINIT (1U << 28) +#define TXGBE_FLAG2_ECC_ERR_RESET (1U << 29) +#define TXGBE_FLAG2_RX_LEGACY (1U << 30) +#define TXGBE_FLAG2_PCIE_NEED_RECOVER (1U << 31) +/* amlite: new SW-FW mbox */ +//#define TXGBE_FLAG2_SWFW_MBOX_REPLY (1U << 30) +#define TXGBE_FLAG2_SERVICE_RUNNING (1U << 13) + +/* amlite: dma reset */ +#define TXGBE_FLAG2_DMA_RESET_REQUESTED (1U << 2) + +#define TXGBE_FLAG2_PCIE_NEED_Q_RESET (1U << 30) + +#define TXGBE_FLAG3_PHY_EVENT (1U << 0) +#define TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS (1U << 1) + +#define TXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +enum txgbe_isb_idx { + TXGBE_ISB_HEADER, + TXGBE_ISB_MISC, + TXGBE_ISB_VEC0, + TXGBE_ISB_VEC1, + TXGBE_ISB_MAX +}; +#define TXGBE_PHY_FEC_RS (1U) +#define TXGBE_PHY_FEC_BASER (1U << 1) +#define TXGBE_PHY_FEC_OFF (1U << 2) +#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | TXGBE_PHY_FEC_BASER |\ + TXGBE_PHY_FEC_RS) + +/* board specific private data structure */ +struct txgbe_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ||\ + defined(NETIF_F_HW_VLAN_STAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; /* must be first, see txgbe_receive_skb */ +#else /* HAVE_VLAN_RX_REGISTER */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif /* HAVE_VLAN_RX_REGISTER */ +#endif + /* OS defined structs */ + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct pci_dev *pdev; + + unsigned long state; + u32 bp_link_mode; + u32 curbp_link_mode; + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; + u32 flags2; + u32 flags3; + u8 tx_unidir_mode; + u8 an73_mode; + u8 backplane_an; + u8 an73; + u8 autoneg; + u16 ffe_main; + u16 ffe_pre; + u16 ffe_post; + u8 ffe_set; + u16 fec_mode; + u8 backplane_mode; + u8 backplane_auto; + struct phytxeq aml_txeq; + bool an_done; + u32 fsm; + + bool cloud_mode; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + + bool lro_before_xdp; + u16 old_rss_limit; + /* XDP */ + int num_xdp_queues; + struct txgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + unsigned long *af_xdp_zc_qps; + + /* TX */ + struct txgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct txgbe_ring *rx_ring[MAX_RX_QUEUES]; + int num_rx_pools; /* does not include pools assigned to VFs */ + int num_rx_queues_per_pool; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + +#ifdef HAVE_DCBNL_IEEE + struct ieee_pfc *txgbe_ieee_pfc; + struct ieee_ets *txgbe_ieee_ets; +#endif + struct txgbe_dcb_config dcb_cfg; + struct txgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; +#ifndef HAVE_MQPRIO + u8 dcb_tc; +#endif + enum txgbe_fc_mode last_lfc_mode; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + struct txgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif +#ifndef TXGBE_NO_LRO + struct txgbe_lro_stats lro_stats; +#endif + +#ifdef ETHTOOL_TEST + u64 test_icr; + struct txgbe_ring test_tx_ring; + struct txgbe_ring test_rx_ring; +#endif + + /* structs defined in txgbe_hw.h */ + struct txgbe_hw hw; + u16 msg_enable; + struct txgbe_hw_stats stats; +#ifndef TXGBE_NO_LLI + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; +#endif /* TXGBE_NO_LLI */ + + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int xdp_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + u32 speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + struct mutex e56_lock; + + struct timer_list service_timer; + struct work_struct service_task; + struct work_struct sfp_sta_task; + struct work_struct temp_task; +#ifdef POLL_LINK_STATUS + struct timer_list link_check_timer; +#endif + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union txgbe_atr_input fdir_mask; + int fdir_filter_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + + struct txgbe_etype_filter_info etype_filter_info; + struct txgbe_5tuple_filter_info ft_filter_info; + +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_fcoe fcoe; +#endif /* CONFIG_FCOE */ + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bd_number; +#ifdef HAVE_BRIDGE_ATTRIBS + u16 bridge_mode; +#endif + u8 fec_link_mode; + u8 cur_fec_link; + bool link_valid; + u32 etrack_id; + char eeprom_id[32]; + char fl_version[16]; + char fw_version[64]; + bool netdev_registered; + u32 interrupt_event; +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + u32 led_reg; +#endif + +#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp) (struct txgbe_adapter *); + u64 pps_edge_start; + u64 pps_edge_end; + u64 sec_to_cc; + u8 pps_enabled; +#endif /* HAVE_PTP_1588_CLOCK */ + + DECLARE_BITMAP(active_vfs, TXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + unsigned int max_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; +#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; +#endif + struct txgbe_mac_addr *mac_table; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + __le16 vxlan_port; +#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + __le16 geneve_port; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef TXGBE_SYSFS +#ifdef TXGBE_HWMON + struct hwmon_buff txgbe_hwmon_buff; +#endif /* TXGBE_HWMON */ +#else /* TXGBE_SYSFS */ +#ifdef TXGBE_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + u64 old_lsc; + struct proc_dir_entry *therm_dir; + struct txgbe_therm_proc_data therm_data; +#endif /* TXGBE_PROCFS */ +#endif /* TXGBE_SYSFS */ + +#ifdef HAVE_TXGBE_DEBUG_FS + struct dentry *txgbe_dbg_adapter; +#endif /*HAVE_TXGBE_DEBUG_FS*/ + u8 default_up; +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + unsigned int indices; +#endif /* !HAVE_NETDEV_SELECT_QUEUE*/ +#endif /* HAVE_TX_MQ */ + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; + +#define TXGBE_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[TXGBE_MAX_RETA_ENTRIES]; +#define TXGBE_RSS_KEY_SIZE 40 + u32 rss_key[TXGBE_RSS_KEY_SIZE / sizeof(u32)]; + + void *ipsec; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[TXGBE_ISB_MAX]; + + u64 eth_priv_flags; +#define TXGBE_ETH_PRIV_FLAG_LLDP BIT(0) +#define TXGBE_ETH_PRIV_FLAG_LEGACY_RX BIT(1) + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + /* AF_XDP zero-copy */ +#ifdef HAVE_NETDEV_BPF_XSK_POOL + struct xsk_buff_pool **xsk_pools; +#else + struct xdp_umem **xsk_pools; +#endif /* HAVE_NETDEV_BPF_XSK_POOL */ + u16 num_xsk_pools_used; + u16 num_xsk_pools; +#endif + bool cmplt_to_dis; + u8 i2c_eeprom[512]; + u32 eeprom_len; + u32 eeprom_type; + + /* amlite: new SW-FW mbox */ +/* u32 swfw_mbox_buf[64]; */ + u8 swfw_index; + u8 desc_reserved; + + int amlite_temp; + + int vlan_rate_link_speed; + DECLARE_BITMAP(limited_vlans, 4096); + int active_vlan_limited; + int queue_rate_limit[64]; // From back to front +}; + +static inline u32 txgbe_misc_isb(struct txgbe_adapter *adapter, + enum txgbe_isb_idx idx) +{ + u32 cur_tag = 0; + u32 cur_diff = 0; + + cur_tag = adapter->isb_mem[TXGBE_ISB_HEADER]; + cur_diff = cur_tag - adapter->isb_tag[idx]; + + adapter->isb_tag[idx] = cur_tag; + + return adapter->isb_mem[idx]; +} + +static inline u8 txgbe_max_rss_indices(struct txgbe_adapter *adapter) +{ + if (adapter->xdp_prog) + return TXGBE_MAX_XDP_RSS_INDICES; + return TXGBE_MAX_RSS_INDICES; +} + +struct txgbe_fdir_filter { + struct hlist_node fdir_node; + union txgbe_atr_input filter; + u16 sw_idx; + u64 action; +}; + +enum txgbe_state_t { + __TXGBE_TESTING, + __TXGBE_RESETTING, + __TXGBE_DOWN, + __TXGBE_HANGING, + __TXGBE_DISABLED, + __TXGBE_REMOVING, + __TXGBE_SERVICE_SCHED, + __TXGBE_SERVICE_INITED, + __TXGBE_IN_SFP_INIT, +#ifdef HAVE_PTP_1588_CLOCK + __TXGBE_PTP_RUNNING, + __TXGBE_PTP_TX_IN_PROGRESS, +#endif + __TXGBE_SWFW_BUSY, +}; + +struct txgbe_cb { +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + dma_addr_t dma; +#ifndef TXGBE_NO_LRO + __be32 tsecr; /* timestamp echo response */ + u32 tsval; /* timestamp value in host order */ + u32 next_seq; /* next expected sequence number */ + u16 free; /* 65521 minus total size */ + u16 mss; /* size of data portion of packet */ +#endif /* TXGBE_NO_LRO */ +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + bool page_released; + bool dma_released; +#endif +}; +#define TXGBE_CB(skb) ((struct txgbe_cb *)(skb)->cb) + +/* ESX txgbe CIM IOCTL definition */ + +#ifdef TXGBE_SYSFS +void txgbe_sysfs_exit(struct txgbe_adapter *adapter); +int txgbe_sysfs_init(struct txgbe_adapter *adapter); +#endif /* TXGBE_SYSFS */ +#ifdef TXGBE_PROCFS +void txgbe_procfs_exit(struct txgbe_adapter *adapter); +int txgbe_procfs_init(struct txgbe_adapter *adapter); +int txgbe_procfs_topdir_init(void); +void txgbe_procfs_topdir_exit(void); +#endif /* TXGBE_PROCFS */ + +extern struct dcbnl_rtnl_ops dcbnl_ops; +int txgbe_copy_dcb_cfg(struct txgbe_adapter *adapter, int tc_max); + +u8 txgbe_dcb_txq_to_tc(struct txgbe_adapter *adapter, u8 index); + +/* needed by txgbe_main.c */ +int txgbe_validate_mac_addr(u8 *mc_addr); +void txgbe_check_options(struct txgbe_adapter *adapter); +void txgbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by txgbe_ethtool.c */ +extern char txgbe_driver_name[]; +extern const char txgbe_driver_version[]; + +void txgbe_service_event_schedule(struct txgbe_adapter *adapter); +void txgbe_irq_disable(struct txgbe_adapter *adapter); +void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush); +int txgbe_open(struct net_device *netdev); +int txgbe_close(struct net_device *netdev); +void txgbe_up(struct txgbe_adapter *adapter); +void txgbe_down(struct txgbe_adapter *adapter); +void txgbe_reinit_locked(struct txgbe_adapter *adapter); +void txgbe_reset(struct txgbe_adapter *adapter); +void txgbe_set_ethtool_ops(struct net_device *netdev); +int txgbe_setup_rx_resources(struct txgbe_ring *); +int txgbe_setup_tx_resources(struct txgbe_ring *); +void txgbe_free_rx_resources(struct txgbe_ring *); +void txgbe_free_tx_resources(struct txgbe_ring *); +void txgbe_configure_rx_ring(struct txgbe_adapter *, + struct txgbe_ring *); +void txgbe_configure_tx_ring(struct txgbe_adapter *, + struct txgbe_ring *); +void txgbe_update_stats(struct txgbe_adapter *adapter); +int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter); +void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter); +void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter); +void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter); +bool txgbe_is_txgbe(struct pci_dev *pcidev); +netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *, + struct txgbe_adapter *, + struct txgbe_ring *); +void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *, + struct txgbe_tx_buffer *); +bool txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count); +void txgbe_configure_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *); +void txgbe_clear_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *); +void txgbe_clear_vxlan_port(struct txgbe_adapter *); +void txgbe_set_rx_mode(struct net_device *netdev); +int txgbe_write_mc_addr_list(struct net_device *netdev); +int txgbe_setup_tc(struct net_device *dev, u8 tc); +void txgbe_tx_ctxtdesc(struct txgbe_ring *, u32, u32, u32, u32); +void txgbe_do_reset(struct net_device *netdev); +void txgbe_write_eitr(struct txgbe_q_vector *q_vector); +int txgbe_poll(struct napi_struct *napi, int budget); +void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *); +void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter); +void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr); +#endif +void txgbe_print_tx_hang_status(struct txgbe_adapter *adapter); + +#if IS_ENABLED(CONFIG_FCOE) +void txgbe_configure_fcoe(struct txgbe_adapter *adapter); +int txgbe_fso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len); +int txgbe_fcoe_ddp(struct txgbe_adapter *adapter, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); +int txgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +int txgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ +int txgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +int txgbe_setup_fcoe_ddp_resources(struct txgbe_adapter *adapter); +void txgbe_free_fcoe_ddp_resources(struct txgbe_adapter *adapter); +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE +int txgbe_fcoe_enable(struct net_device *netdev); +int txgbe_fcoe_disable(struct net_device *netdev); +#else +int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter); +void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter); +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_OPS_GETAPP +u8 txgbe_fcoe_getapp(struct net_device *netdev); +#endif /* HAVE_DCBNL_OPS_GETAPP */ +u8 txgbe_fcoe_setapp(struct txgbe_adapter *adapter, u8 up); +#endif /* CONFIG_DCB */ +u8 txgbe_fcoe_get_tc(struct txgbe_adapter *adapter); +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN +int txgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +#endif +#endif /* CONFIG_FCOE */ + +#ifdef HAVE_TXGBE_DEBUG_FS +void txgbe_dbg_adapter_init(struct txgbe_adapter *adapter); +void txgbe_dbg_adapter_exit(struct txgbe_adapter *adapter); +void txgbe_dbg_init(void); +void txgbe_dbg_exit(void); +#endif /* HAVE_TXGBE_DEBUG_FS */ +void txgbe_dump(struct txgbe_adapter *adapter); +void txgbe_setup_reta(struct txgbe_adapter *adapter); + +static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_IEEE +s32 txgbe_dcb_hw_ets(struct txgbe_hw *hw, struct ieee_ets *ets, int max_frame); +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ + +int txgbe_wol_supported(struct txgbe_adapter *adapter); +int txgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +int txgbe_write_uc_addr_list(struct net_device *netdev, int pool); +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter); +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool); +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool); +int txgbe_available_rars(struct txgbe_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +void txgbe_vlan_mode(struct net_device *, u32); +#endif + +#ifdef HAVE_PTP_1588_CLOCK +void txgbe_ptp_init(struct txgbe_adapter *adapter); +void txgbe_ptp_stop(struct txgbe_adapter *adapter); +void txgbe_ptp_suspend(struct txgbe_adapter *adapter); +void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter); +void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter); +void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb); +int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr); +int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr); +void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter); +void txgbe_ptp_reset(struct txgbe_adapter *adapter); +void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter); +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef CONFIG_PCI_IOV +void txgbe_sriov_reinit(struct txgbe_adapter *adapter); +#endif + +void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter); + +u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter); +void txgbe_store_reta(struct txgbe_adapter *adapter); +void txgbe_store_vfreta(struct txgbe_adapter *adapter); + +int txgbe_setup_isb_resources(struct txgbe_adapter *adapter); +void txgbe_free_isb_resources(struct txgbe_adapter *adapter); +void txgbe_configure_isb(struct txgbe_adapter *adapter); + +void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring); +void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring); +u32 txgbe_tx_cmd_type(u32 tx_flags); +void txgbe_free_headwb_resources(struct txgbe_ring *ring); +u16 txgbe_frac_to_bi(u16 frac, u16 denom, int max_bits); +int txgbe_link_mbps(struct txgbe_adapter *adapter); + +int txgbe_find_nth_limited_vlan(struct txgbe_adapter *adapter, int vlan); +void txgbe_del_vlan_limit(struct txgbe_adapter *adapter, int vlan); +void txgbe_set_vlan_limit(struct txgbe_adapter *adapter, int vlan, int rate_limit); +void txgbe_check_vlan_rate_limit(struct txgbe_adapter *adapter); + +/** + * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. + * disable a interrupt by writing to PX_IMS with the corresponding bit=1 + * enable a interrupt by writing to PX_IMC with the corresponding bit=1 + * trigger a interrupt by writing to PX_ICS with the corresponding bit=1 + **/ +#define TXGBE_INTR_ALL (~0ULL) +#define TXGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#define TXGBE_INTR_Q(i) (1ULL << (i)) +static inline void txgbe_intr_enable(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_IMC(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_IMC(1), mask); + + /* skip the flush */ +} + +static inline void txgbe_intr_disable(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_IMS(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_IMS(1), mask); + + /* skip the flush */ +} + +static inline void txgbe_intr_trigger(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_ICS(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_ICS(1), mask); + + /* skip the flush */ +} + +#define TXGBE_RING_SIZE(R) ((R)->count < TXGBE_MAX_TXD ? (R)->count / 128 : 0) + +#endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c new file mode 100644 index 0000000000000000000000000000000000000000..a1a9751db0581cefa41f6a42eeaa9d6cba5185de --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -0,0 +1,463 @@ +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_aml.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe_phy.h" + +#include "txgbe.h" + +/** + * txgbe_get_media_type_aml - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum txgbe_media_type txgbe_get_media_type_aml(struct txgbe_hw *hw) +{ + u8 device_type = hw->subsystem_device_id & 0xF0; + enum txgbe_media_type media_type; + + switch (device_type) { + case TXGBE_ID_KR_KX_KX4: + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * txgbe_setup_mac_link_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 txgbe_setup_mac_link_aml(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool link_up = false; + bool autoneg = false; + s32 ret_status = 0; + int i = 0; + s32 status = 0; + u32 value = 0; + + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw)) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (!adapter->backplane_an) { + if ((link_speed == speed) && link_up) + goto out; + } else { + if (link_up && adapter->an_done) + goto out; + } + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 25, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + return 0; + } + + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) + goto out; + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + break; + msleep(250); + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + if ((link_speed == speed) && link_up && + !(speed == TXGBE_LINK_SPEED_25GB_FULL && + !(adapter->fec_link_mode & adapter->cur_fec_link))) + goto out; + + if (speed == TXGBE_LINK_SPEED_25GB_FULL && + link_speed == TXGBE_LINK_SPEED_25GB_FULL) { + txgbe_e56_fec_mode_polling(hw, &link_up); + + if (link_up) + goto out; + } + + mutex_lock(&adapter->e56_lock); + ret_status = txgbe_set_link_to_amlite(hw, speed); + mutex_unlock(&adapter->e56_lock); + + if (ret_status == TXGBE_ERR_PHY_INIT_NOT_DONE) + goto out; + + if (ret_status == TXGBE_ERR_TIMEOUT) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + goto out; + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + txgbe_e56_fec_mode_polling(hw, &link_up); + } else { + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(250); + } + } + +out: + return status; +} + +/** + * txgbe_get_link_capabilities_aml - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 txgbe_get_link_capabilities_aml(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + struct txgbe_adapter *adapter = hw->back; + s32 status = 0; + + if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = true; + } else if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + + if (hw->phy.fiber_suppport_speed == + TXGBE_LINK_SPEED_10GB_FULL && AUTO <= 1) { + adapter->backplane_an = false; + *autoneg = false; + } else { + *autoneg = true; + } + *speed = hw->phy.fiber_suppport_speed; + } else if (hw->phy.sfp_type == txgbe_sfp_type_25g_sr_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_sr_core1 || + hw->phy.sfp_type == txgbe_sfp_type_25g_lr_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_lr_core1) { + *speed = TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = false; + } else if (hw->phy.sfp_type == txgbe_sfp_type_25g_aoc_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_aoc_core1) { + *speed = TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = false; + } else { + /* SFP */ + if (hw->phy.sfp_type == txgbe_sfp_type_not_present) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = true; + } + + return status; +} + +/** + * txgbe_check_mac_link_aml - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 txgbe_check_mac_link_aml(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + } else { + *link_up = false; + } + } + + if ((hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + *link_up = hw->f2c_mod_status; + + if (*link_up) { + /* recover led configure when link up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + } else { + /* over write led when link down */ + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_25G | + TXGBE_AMLITE_LED_LINK_10G | TXGBE_AMLITE_LED_LINK_ACTIVE); + } + } + + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) == + TXGBE_CFG_PORT_ST_AML_LINK_25G) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * txgbe_setup_mac_link_multispeed_fiber_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +static s32 txgbe_setup_mac_link_multispeed_fiber_aml(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool autoneg, link_up = false; + u32 speedcnt = 0; + s32 status = 0; + + /* Mask off requested but non-supported speeds */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_speed, &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & TXGBE_LINK_SPEED_25GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_25GB_FULL; + + /* If we already have link at this speed, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + if ((link_speed == TXGBE_LINK_SPEED_25GB_FULL) && link_up && + adapter->fec_link_mode & adapter->cur_fec_link) + goto out; + + /* Allow module to change analog characteristics (10G->25G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_25GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /*aml wait link in setup,no need to repeatly wait*/ + /* If we have link, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_up) + goto out; + } + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if ((link_speed == TXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (25G->10G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /*aml wait link in setup,no need to repeatly wait*/ + /* If we have link, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_up) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + goto out; + } + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = txgbe_setup_mac_link_multispeed_fiber_aml(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + return status; +} + + +static void txgbe_init_mac_link_ops_aml(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + if (mac->ops.get_media_type(hw) == txgbe_media_type_fiber) { + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber_aml; + mac->ops.setup_mac_link = txgbe_setup_mac_link_aml; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link_aml; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } + } +} + +static s32 txgbe_setup_sfp_modules_aml(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + DEBUGFUNC("txgbe_setup_sfp_modules_aml"); + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_aml(hw); + + hw->phy.ops.reset = NULL; + } + + return ret_val; +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 txgbe_init_phy_ops_aml(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + txgbe_init_i2c(hw); + wr32(hw, TXGBE_MAC_MDIO_CLAUSE_22_PORT, + TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22); + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_aml(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + +init_phy_ops_out: + return ret_val; +} + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_aml; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_aml; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_aml; + + /* LINK */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_aml; + mac->ops.setup_link = txgbe_setup_mac_link_aml; + mac->ops.check_link = txgbe_check_mac_link_aml; + + return ret_val; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h new file mode 100644 index 0000000000000000000000000000000000000000..4a65d60e204a85ec3eebd5a06d129c7de2df5110 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -0,0 +1,7 @@ +#ifndef _TXGBE_AML_H_ +#define _TXGBE_AML_H_ + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw); + +#endif /* _TXGBE_AML_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c new file mode 100644 index 0000000000000000000000000000000000000000..996c565e95e1ab3a6357754c033c66b7dcfe433e --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c @@ -0,0 +1,293 @@ +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_aml40.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe_phy.h" + +#include "txgbe.h" + +/** + * txgbe_get_media_type_aml40 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum txgbe_media_type txgbe_get_media_type_aml40(struct txgbe_hw *hw) +{ + u8 device_type = hw->subsystem_device_id & 0xF0; + enum txgbe_media_type media_type; + + switch (device_type) { + case TXGBE_ID_KR_KX_KX4: + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber_qsfp; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * txgbe_setup_mac_link_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 txgbe_setup_mac_link_aml40(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool link_up = false; + bool autoneg = false; + s32 ret_status = 0; + int i = 0; + s32 status = 0; + + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (!adapter->backplane_an) { + if ((link_speed == speed) && link_up) + goto out; + } else { + if (link_up && adapter->an_done) + goto out; + } + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 40, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + goto out; + } + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + break; + msleep(250); + } + + if ((link_speed == speed) && link_up) + goto out; + + mutex_lock(&adapter->e56_lock); + ret_status = txgbe_set_link_to_amlite(hw, speed); + mutex_unlock(&adapter->e56_lock); + + if (ret_status == TXGBE_ERR_TIMEOUT) + adapter->link_valid = false; + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(250); + } + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + +out: + return status; +} + +/** + * txgbe_get_link_capabilities_aml40 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 txgbe_get_link_capabilities_aml40(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1) { + *autoneg = true; + *speed = TXGBE_LINK_SPEED_40GB_FULL; + } else if (txgbe_is_backplane(hw)) { + *speed = TXGBE_LINK_SPEED_40GB_FULL; + *autoneg = true; + } else { + *speed = TXGBE_LINK_SPEED_40GB_FULL; + *autoneg = true; + } + + return status; +} + +/** + * txgbe_check_mac_link_aml40 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 txgbe_check_mac_link_aml40(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + struct txgbe_adapter *adapter = hw->back; + u32 links_reg = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + + if (!adapter->link_valid) { + *link_up = false; + + msleep(100); + continue; + } + + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) + *link_up = true; + else + *link_up = false; + } + + if (!adapter->link_valid) + *link_up = false; + + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) == + TXGBE_CFG_PORT_ST_AML_LINK_40G) + *speed = TXGBE_LINK_SPEED_40GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + if (txgbe_is_backplane(hw)) { + if (!adapter->an_done) { + *link_up = false; + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + } + + return 0; +} + +static void txgbe_init_mac_link_ops_aml40(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + mac->ops.setup_link = txgbe_setup_mac_link_aml40; + mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed; +} + +static s32 txgbe_setup_sfp_modules_aml40(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + DEBUGFUNC("txgbe_setup_sfp_modules_aml40"); + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_aml40(hw); + + hw->phy.ops.reset = NULL; + } + + return ret_val; +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 txgbe_init_phy_ops_aml40(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + txgbe_init_i2c(hw); + wr32(hw, TXGBE_MAC_MDIO_CLAUSE_22_PORT, + TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22); + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_aml40(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + +init_phy_ops_out: + return ret_val; +} + +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_aml40; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_aml40; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_aml40; + + /* LINK */ + mac->ops.check_link = txgbe_check_mac_link_aml40; + mac->ops.setup_link = txgbe_setup_mac_link_aml40; + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_aml40; + + return ret_val; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h new file mode 100644 index 0000000000000000000000000000000000000000..b264ee0db1cbe85bfe6c6a41638fead386bdd72b --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h @@ -0,0 +1,8 @@ +#ifndef _TXGBE_AML40_H_ +#define _TXGBE_AML40_H_ + +enum txgbe_media_type txgbe_get_media_type_aml40(struct txgbe_hw *hw); +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops_aml40(struct txgbe_hw *hw); +#endif /* _TXGBE_AML40_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c new file mode 100644 index 0000000000000000000000000000000000000000..9618c47e553434ad1d7a469a6083e1094e9c68e4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c @@ -0,0 +1,615 @@ +#include "txgbe_bp.h" + +void txgbe_bp_close_protect(struct txgbe_adapter *adapter) +{ + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_DOWN; + while (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT){ + msleep(100); + printk("wait to reinited ok..%x\n",adapter->flags2); + } +} + +int txgbe_bp_mode_setting(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /*default to open an73*/ + if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) + adapter->backplane_an = AUTO ? 1 : 0; + + switch (hw->mac.type) { + case txgbe_mac_sp: + if (AUTO > 1) + adapter->backplane_an = AUTO ? 1 : 0; + break; + case txgbe_mac_aml40: + case txgbe_mac_aml: + default: + adapter->backplane_an = AUTO ? 1 : 0; + break; + } + + adapter->autoneg = AUTO ? 1 : 0; + switch (adapter->backplane_mode) { + case TXGBE_BP_M_KR: + hw->subsystem_device_id = TXGBE_ID_WX1820_KR_KX_KX4; + break; + case TXGBE_BP_M_KX4: + hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_XAUI; + break; + case TXGBE_BP_M_KX: + hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_SGMII; + break; + case TXGBE_BP_M_SFI: + hw->subsystem_device_id = TXGBE_ID_WX1820_SFP; + break; + default: + break; + } + + if (adapter->backplane_auto == TXGBE_BP_M_AUTO) { + adapter->backplane_an = 1; + adapter->autoneg = 1; + } else if (adapter->backplane_auto == TXGBE_BP_M_NAUTO) { + adapter->backplane_an = 0; + adapter->autoneg = 0; + } + + if ((adapter->ffe_set == 0) && (KR_SET == 0)) + return 0; + + if (KR_SET == 1) { + adapter->ffe_main = KR_MAIN; + adapter->ffe_pre = KR_PRE; + adapter->ffe_post = KR_POST; + } else if (!KR_SET && KX4_SET == 1) { + adapter->ffe_main = KX4_MAIN; + adapter->ffe_pre = KX4_PRE; + adapter->ffe_post = KX4_POST; + } else if (!KR_SET && !KX4_SET && KX_SET == 1) { + adapter->ffe_main = KX_MAIN; + adapter->ffe_pre = KX_PRE; + adapter->ffe_post = KX_POST; + } else if (!KR_SET && !KX4_SET && !KX_SET && SFI_SET == 1) { + adapter->ffe_main = SFI_MAIN; + adapter->ffe_pre = SFI_PRE; + adapter->ffe_post = SFI_POST; + } + return 0; +} + +void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0; + int ret = 0; + + /* only continue if link is down */ + if (netif_carrier_ok(adapter->netdev)) + return; + + if (adapter->flags2 & TXGBE_FLAG2_KR_TRAINING) { + value = txgbe_rd32_epcs(hw, 0x78002); + if ((value & BIT(2)) == BIT(2)) { + e_info(hw, "Enter training\n"); + ret = handle_bkp_an73_flow(0, adapter); + if (ret) + txgbe_set_link_to_kr(hw, 1); + } + adapter->flags2 &= ~TXGBE_FLAG2_KR_TRAINING; + } +} + +void txgbe_bp_down_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 val = 0, val1 = 0; + + if (adapter->backplane_an == 0) + return; + + val = txgbe_rd32_epcs(hw, 0x78002); + val1 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + kr_dbg(KR_MODE, "AN INT : %x - AN CTL : %x - PL : %x\n", + val, val1, txgbe_rd32_epcs(hw, 0x70012)); + switch (AN73_TRAINNING_MODE) { + case 0: + msleep(1000); + if ((val & BIT(2)) == BIT(2)) { + if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) + adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; + } else { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + } + break; + case 1: + msleep(100); + if ((val & BIT(2)) == BIT(2)) { + if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) + adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; + } else { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + } + break; + default: + break; + } +} + +/*Check Ethernet Backplane AN73 Base Page Ability +**return value: +** -1 : none link mode matched, exit +** 0 : current link mode matched, wait AN73 to be completed +** 1 : current link mode not matched, set to matched link mode, re-start AN73 external +*/ +int chk_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability, + struct txgbe_adapter *adapter) +{ + unsigned int comLinkAbility; + + kr_dbg(KR_MODE, "CheckBkpAn73Ability():\n"); + kr_dbg(KR_MODE, "------------------------\n"); + + /*-- Check the common link ability and take action based on the result*/ + comLinkAbility = tBkpAn73Ability.linkAbility & tLpBkpAn73Ability.linkAbility; + kr_dbg(KR_MODE, "comLinkAbility= 0x%x, linkAbility= 0x%x, lpLinkAbility= 0x%x\n", + comLinkAbility, tBkpAn73Ability.linkAbility, tLpBkpAn73Ability.linkAbility); + + /*only support kr*/ + if (comLinkAbility == 0){ + kr_dbg(KR_MODE, "WARNING: The Link Partner does not support any compatible speed mode!!!\n\n"); + return -1; + } else if (comLinkAbility & 0x80) { + if (tBkpAn73Ability.currentLinkMode == 0){ + kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KR].\n"); + return 0; + } else { + kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KR].\n"); + kr_dbg(KR_MODE, "Set the local link mode to [LINK_KR] ...\n"); + return 1; + } + } + +#if 0 + if (comLinkAbility == 0){ + kr_dbg(KR_MODE, "WARNING: The Link Partner does not support any compatible speed mode!!!\n\n"); + return -1; + } else if (comLinkAbility & 0x80) { + if (tBkpAn73Ability.currentLinkMode == 0){ + kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KR].\n"); + return 0; + }else{ + kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KR].\n"); + kr_dbg(KR_MODE, "Set the local link mode to [LINK_KR] ...\n"); + txgbe_set_link_to_kr(hw, 1); + return 1; + } + } else if (comLinkAbility & 0x40) { + if (tBkpAn73Ability.currentLinkMode == 0x10){ + kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KX4].\n"); + return 0; + } else { + kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KX4].\n"); + kr_dbg(KR_MODE, "Set the local link mode to [LINK_KX4] ...\n"); + txgbe_set_link_to_kx4(hw, 1); + return 1; + } + } else if (comLinkAbility & 0x20) { + if (tBkpAn73Ability.currentLinkMode == 0x1){ + kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KX].\n"); + return 0; + } else { + kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KX].\n"); + kr_dbg(KR_MODE, "Set the local link mode to [LINK_KX] ...\n"); + txgbe_set_link_to_kx(hw, 1, 1); + return 1; + } + } +#endif + return 0; +} + +static void txgbe_bp_print_page_status(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0; + + rdata = txgbe_rd32_epcs(hw, 0x70010); + kr_dbg(KR_MODE, "read 70010 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70011); + kr_dbg(KR_MODE, "read 70011 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70012); + kr_dbg(KR_MODE, "read 70012 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70013); + kr_dbg(KR_MODE, "read 70013 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70014); + kr_dbg(KR_MODE, "read 70014 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70015); + kr_dbg(KR_MODE, "read 70015 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70016); + kr_dbg(KR_MODE, "read 70016 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70017); + kr_dbg(KR_MODE, "read 70017 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70018); + kr_dbg(KR_MODE, "read 70018 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70019); + kr_dbg(KR_MODE, "read 70019 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70020); + kr_dbg(KR_MODE, "read 70020 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70021); + kr_dbg(KR_MODE, "read 70021 data %0x\n", rdata); +} + +static void txgbe_bp_exchange_page(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 an_int, base_page = 0; + int count = 0; + + an_int = txgbe_rd32_epcs(hw, 0x78002); + if (!(an_int & BIT(2))) + return; + /* 500ms timeout */ + for (count = 0; count < 5000; count++) { + kr_dbg(KR_MODE, "-----count----- %d\n", count); + if (an_int & BIT(2)) { + u8 next_page = 0; + u32 rdata, addr; + + txgbe_bp_print_page_status(adapter); + addr = base_page == 0 ? 0x70013 : 0x70019; + rdata = txgbe_rd32_epcs(hw, addr); + if (rdata & BIT(14)) { + if (rdata & BIT(15)) { + /* always set null message */ + txgbe_wr32_epcs(hw, 0x70016, 0x2001); + kr_dbg(KR_MODE, "write 70016 0x%0x\n", + 0x2001); + rdata = txgbe_rd32_epcs(hw, 0x70010); + txgbe_wr32_epcs(hw, 0x70010, + rdata | BIT(15)); + kr_dbg(KR_MODE, "write 70010 0x%0x\n", + rdata); + next_page = 1; + } else { + next_page = 0; + } + base_page = 1; + } + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + kr_dbg(KR_MODE, "write 78002 0x%0x\n", 0x0000); + usec_delay(100); + if (next_page == 0) + return; + } + usec_delay(100); + } +} + +/*Get Ethernet Backplane AN73 Base Page Ability +**byLinkPartner: +**- 1: Get Link Partner Base Page +**- 2: Get Link Partner Next Page (only get NXP Ability Register 1 at the moment) +**- 0: Get Local Device Base Page +*/ +int get_bkp_an73_ability(bkpan73ability *pt_bkp_an73_ability, unsigned char byLinkPartner, + struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int rdata; + int status = 0; + + kr_dbg(KR_MODE, "byLinkPartner = %d\n", byLinkPartner); + kr_dbg(KR_MODE, "----------------------------------------\n"); + + if (byLinkPartner == 1) /*Link Partner Base Page*/ + { + /*Read the link partner AN73 Base Page Ability Registers*/ + kr_dbg(KR_MODE, "Read the link partner AN73 Base Page Ability Registers...\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 1: 0x%x\n", rdata); + pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01; + kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage); + + /* if have next pages, exchange next pages. */ + if (pt_bkp_an73_ability->nextPage) + txgbe_bp_exchange_page(adapter); + + rdata = txgbe_rd32_epcs(hw, 0x70014); + kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 2: 0x%x\n", rdata); + pt_bkp_an73_ability->linkAbility = rdata & 0xE0; + kr_dbg(KR_MODE, " Link Ability (bit[15:0]): 0x%x\n", + pt_bkp_an73_ability->linkAbility); + kr_dbg(KR_MODE, " (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + kr_dbg(KR_MODE, " 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + + rdata = txgbe_rd32_epcs(hw, 0x70015); + kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 3: 0x%x\n", rdata); + kr_dbg(KR_MODE, " FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + kr_dbg(KR_MODE, " FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + pt_bkp_an73_ability->fecAbility = (rdata >> 14) & 0x03; + } else if (byLinkPartner == 2) {/*Link Partner Next Page*/ + /*Read the link partner AN73 Next Page Ability Registers*/ + kr_dbg(KR_MODE, "\nRead the link partner AN73 Next Page Ability Registers...\n"); + rdata = txgbe_rd32_epcs(hw, 0x70019); + kr_dbg(KR_MODE, " SR AN MMD LP XNP Ability Register 1: 0x%x\n", rdata); + pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE)e_dev_info(" Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage); + } else { + /*Read the local AN73 Base Page Ability Registers*/ + kr_dbg(KR_MODE, "\nRead the local AN73 Base Page Ability Registers...\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 1: 0x%x\n", rdata); + pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01; + kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage); + + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 2: 0x%x\n", rdata); + pt_bkp_an73_ability->linkAbility = rdata & 0xE0; + kr_dbg(KR_MODE, " Link Ability (bit[15:0]): 0x%x\n", + pt_bkp_an73_ability->linkAbility); + kr_dbg(KR_MODE, " (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + kr_dbg(KR_MODE, " 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + + rdata = txgbe_rd32_epcs(hw, 0x70012); + kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 3: 0x%x\n", rdata); + kr_dbg(KR_MODE, " FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + kr_dbg(KR_MODE, " FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + pt_bkp_an73_ability->fecAbility = (rdata >> 14) & 0x03; + } /*if (byLinkPartner == 1) Link Partner Base Page*/ + + return status; +} + +/* DESCRIPTION: Set the source data fields[bitHigh:bitLow] with setValue +** INPUTS: *src_data: Source data pointer +** bitHigh: High bit position of the fields +** bitLow : Low bit position of the fields +** setValue: Set value of the fields +** OUTPUTS: return the updated source data +*/ +static void set_fields( + unsigned int *src_data, + unsigned int bitHigh, + unsigned int bitLow, + unsigned int setValue) +{ + int i; + + if (bitHigh == bitLow) { + if (setValue == 0) + *src_data &= ~(1 << bitLow); + else + *src_data |= (1 << bitLow); + } else { + for (i = bitLow; i <= bitHigh; i++) + *src_data &= ~(1 << i); + *src_data |= (setValue << bitLow); + } +} + +static void read_phy_lane_txeq(unsigned short lane, struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int addr, rdata; + + /*LANEN_DIG_ASIC_TX_ASIC_IN_1[11:6]: TX_MAIN_CURSOR*/ + addr = 0x100E | (lane << 8); + rdata = rd32_ephy(hw, addr); + kr_dbg(KR_MODE, "PHY LANE%0d TX EQ Read Value:\n", lane); + kr_dbg(KR_MODE, " TX_MAIN_CURSOR: %d\n", ((rdata >> 6) & 0x3F)); + + /*LANEN_DIG_ASIC_TX_ASIC_IN_2[5 :0]: TX_PRE_CURSOR*/ + /*LANEN_DIG_ASIC_TX_ASIC_IN_2[11:6]: TX_POST_CURSOR*/ + addr = 0x100F | (lane << 8); + rdata = rd32_ephy(hw, addr); + kr_dbg(KR_MODE, " TX_PRE_CURSOR : %d\n", (rdata & 0x3F)); + kr_dbg(KR_MODE, " TX_POST_CURSOR: %d\n", ((rdata >> 6) & 0x3F)); + kr_dbg(KR_MODE, "**********************************************\n"); +} + + +/*Enable Clause 72 KR training +** +**Note: +**<1>. The Clause 72 start-up protocol should be initiated when all pages are exchanged during Clause 73 auto- +**negotiation and when the auto-negotiation process is waiting for link status to be UP for 500 ms after +**exchanging all the pages. +** +**<2>. The local device and link partner should be enabled the CL72 KR training +**with in 500ms +** +**enable: +**- bits[1:0] =2'b11: Enable the CL72 KR training +**- bits[1:0] =2'b01: Disable the CL72 KR training +*/ +static int en_cl72_krtr(unsigned int enable, struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int wdata = 0; + u32 val; + + if (enable == 1) { + kr_dbg(KR_MODE, "\nDisable Clause 72 KR Training ...\n"); + read_phy_lane_txeq(0, adapter); + } else if (enable == 3) { + kr_dbg(KR_MODE, "\nEnable Clause 72 KR Training ...\n"); + if (CL72_KRTR_PRBS_MODE_EN != 0xffff) { + /*Set PRBS Timer Duration Control to maximum 6.7ms in VR_PMA_KRTR_PRBS_CTRL1 Register*/ + wdata = CL72_KRTR_PRBS_MODE_EN; + txgbe_wr32_epcs(hw, 0x18005, wdata); + /*Set PRBS Timer Duration Control to maximum 6.7ms in VR_PMA_KRTR_PRBS_CTRL1 Register*/ + wdata = 0xFFFF; + txgbe_wr32_epcs(hw, 0x18004, wdata); + + /*Enable PRBS Mode to determine KR Training Status by setting Bit 0 of VR_PMA_KRTR_PRBS_CTRL0 Register*/ + wdata = 0; + set_fields(&wdata, 0, 0, 1); + } + + /*Enable PRBS31 as the KR Training Pattern by setting Bit 1 of VR_PMA_KRTR_PRBS_CTRL0 Register*/ + if (CL72_KRTR_PRBS31_EN == 1) + set_fields(&wdata, 1, 1, 1); + val = txgbe_rd32_epcs(hw, 0x18003); + wdata |= val; + txgbe_wr32_epcs(hw, 0x18003, wdata); + read_phy_lane_txeq(0, adapter); + } + + /*Enable the Clause 72 start-up protocol by setting Bit 1 of SR_PMA_KR_PMD_CTRL Register. + **Restart the Clause 72 start-up protocol by setting Bit 0 of SR_PMA_KR_PMD_CTRL Register*/ + wdata = enable; + txgbe_wr32_epcs(hw, 0x10096, wdata); + return 0; +} + +static int chk_cl72_krtr_status(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int rdata = 0, rdata1; + int status = 0; + + status = read_poll_timeout(txgbe_rd32_epcs, rdata1, (rdata1 & 0x9), 1000, + 400000, false, hw, 0x10097); + if (!status) { + //Get the latest received coefficient update or status + rdata = txgbe_rd32_epcs(hw, 0x010098); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LP Coefficient Update Register: 0x%x\n", + rdata); + rdata = txgbe_rd32_epcs(hw, 0x010099); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LP Coefficient Status Register: 0x%x\n", + rdata); + rdata = txgbe_rd32_epcs(hw, 0x01009a); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LD Coefficient Update: 0x%x\n", rdata); + + rdata = txgbe_rd32_epcs(hw, 0x01009b); + kr_dbg(KR_MODE, " SR PMA MMD 10GBASE-KR LD Coefficient Status: 0x%x\n", rdata); + + rdata = txgbe_rd32_epcs(hw, 0x010097); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR Status Register: 0x%x\n", rdata); + kr_dbg(KR_MODE, " Training Failure (bit3): %d\n", ((rdata >> 3) & 0x01)); + kr_dbg(KR_MODE, " Start-Up Protocol Status (bit2): %d\n", ((rdata >> 2) & 0x01)); + kr_dbg(KR_MODE, " Frame Lock (bit1): %d\n", ((rdata >> 1) & 0x01)); + kr_dbg(KR_MODE, " Receiver Status (bit0): %d\n", ((rdata >> 0) & 0x01)); + /*If bit3 is set, Training is completed with failure*/ + if ((rdata1 >> 3) & 0x01) { + kr_dbg(KR_MODE, "Training is completed with failure!!!\n"); + read_phy_lane_txeq(0, adapter); + return status; + } + + /*If bit0 is set, Receiver trained and ready to receive data*/ + if ((rdata1 >> 0) & 0x01) { + kr_dbg(KR_MODE, "Receiver trained and ready to receive data ^_^\n"); + e_info(hw, "Receiver ready.\n"); + read_phy_lane_txeq(0, adapter); + return status; + } + } + + kr_dbg(KR_MODE, "ERROR: Check Clause 72 KR Training Complete Timeout!!!\n"); + + return status; +} + +static int txgbe_cl72_trainning(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0, rdata1 = 0; + bool lpld_all_rd = false; + int ret = 0; + + if (AN73_TRAINNING_MODE == 1) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + + ret |= en_cl72_krtr(3, adapter); + kr_dbg(KR_MODE, "\nCheck the Clause 72 KR Training status ...\n"); + ret |= chk_cl72_krtr_status(adapter); + + ret = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x8000), 1000, + 200000, false, hw, 0x10099); + if (!ret) { + rdata1 = txgbe_rd32_epcs(hw, 0x1009b) & 0x8000; + if (rdata1 == 0x8000) + lpld_all_rd = true; + } + + if (lpld_all_rd) { + rdata = rd32_ephy(hw, 0x100E); + rdata1 = rd32_ephy(hw, 0x100F); + e_dev_info("Lp and Ld all Ready, FFE : %d-%d-%d.\n", + (rdata >> 6) & 0x3F, rdata1 & 0x3F, (rdata1 >> 6) & 0x3F); + if (AN73_TRAINNING_MODE == 1 && hw->dac_sfp == false) + if ((((rdata >> 6) & 0x3F) == 27) && + ((rdata1 & 0x3F) == 8) && + (((rdata1 >> 6) & 0x3F)) == 44) + return -1; + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + ret = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x1000), 1000, + 100000, false, hw, 0x30020); + if (!ret) + e_dev_info("INT_AN_INT_CMPLT =1, AN73 Done Success.\n"); + return 0; + } + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + if (AN73_TRAINNING_MODE == 0) + en_cl72_krtr(1, adapter); + + return -1; +} + +int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter) +{ + bkpan73ability tBkpAn73Ability , tLpBkpAn73Ability ; + struct txgbe_hw *hw = &adapter->hw; + bool fec_en = false; + u32 fecAbility = 0; + int ret = 0; + + tBkpAn73Ability.currentLinkMode = bp_link_mode; + + kr_dbg(KR_MODE, "HandleBkpAn73Flow().\n"); + kr_dbg(KR_MODE, "---------------------------------\n"); + + /*1. Get the local AN73 Base Page Ability*/ + kr_dbg(KR_MODE, "<1>. Get the local AN73 Base Page Ability ...\n"); + get_bkp_an73_ability(&tBkpAn73Ability, 0, adapter); + /*2. Check the AN73 Interrupt Status*/ + kr_dbg(KR_MODE, "<2>. Check the AN73 Interrupt Status ...\n"); + + /*3.1. Get the link partner AN73 Base Page Ability*/ + kr_dbg(KR_MODE, "<3.1>. Get the link partner AN73 Base Page Ability ...\n"); + get_bkp_an73_ability(&tLpBkpAn73Ability, 1, adapter); + + /*3.2. Check the AN73 Link Ability with Link Partner*/ + kr_dbg(KR_MODE, "<3.2>. Check the AN73 Link Ability with Link Partner ...\n"); + kr_dbg(KR_MODE, " Local Link Ability: 0x%x\n", tBkpAn73Ability.linkAbility); + kr_dbg(KR_MODE, " Link Partner Link Ability: 0x%x\n", tLpBkpAn73Ability.linkAbility); + + chk_bkp_an73_ability(tBkpAn73Ability, tLpBkpAn73Ability, adapter); + + /*Check the FEC and KR Training for KR mode*/ + kr_dbg(KR_MODE, "<3.3>. Check the FEC for KR mode ...\n"); + fecAbility = tBkpAn73Ability.fecAbility & tLpBkpAn73Ability.fecAbility; + fec_en = fecAbility >= 0x1 ? TRUE : FALSE; + adapter->cur_fec_link = fec_en ? + TXGBE_PHY_FEC_BASER : TXGBE_PHY_FEC_OFF; + /* SR_PMA_KR_FEC_CTRL bit0 */ + txgbe_wr32_epcs(hw, 0x100ab, fec_en); + e_dev_info("KR FEC is %s.\n", fec_en ? "endabled" : "disabled"); + + kr_dbg(KR_MODE, "\n<3.4>. Check the CL72 KR Training for KR mode ...\n"); + + ret = txgbe_cl72_trainning(adapter); + if (ret) + kr_dbg(KR_MODE, "Trainning failure\n"); + + return ret; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h new file mode 100644 index 0000000000000000000000000000000000000000..d73cb2ba02963a3c74ecf86ff269e29622710768 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ +#ifndef _TXGBE_BP_H_ +#define _TXGBE_BP_H_ + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_hw.h" + +typedef enum { + ABILITY_1000BASE_KX, + ABILITY_10GBASE_KX4, + ABILITY_10GBASE_KR, + ABILITY_40GBASE_KR4, + ABILITY_40GBASE_CR4, + ABILITY_100GBASE_CR10, + ABILITY_100GBASE_KP4, + ABILITY_100GBASE_KR4, + ABILITY_100GBASE_CR4, + ABILITY_25GBASE_KRCR_S, + ABILITY_25GBASE_KRCR, + ABILITY_MAX, +} ability_filed_encding; + +/* Backplane AN73 Base Page Ability struct*/ +typedef struct TBKPAN73ABILITY { + unsigned int nextPage; //Next Page (bit0) + unsigned int linkAbility; //Link Ability (bit[7:0]) + unsigned int fecAbility; //FEC Request (bit1), FEC Enable (bit0) + unsigned int currentLinkMode; //current link mode for local device +} bkpan73ability; + +#define kr_dbg(KR_MODE, fmt, arg...) \ + do { \ + if (KR_MODE) \ + e_dev_info(fmt, ##arg); \ + } while (0) + +void txgbe_bp_down_event(struct txgbe_adapter *adapter); +void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter); +int txgbe_bp_mode_setting(struct txgbe_adapter *adapter); +void txgbe_bp_close_protect(struct txgbe_adapter *adapter); +int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter); +int get_bkp_an73_ability(bkpan73ability *pt_bkp_an73_ability, unsigned char byLinkPartner, + struct txgbe_adapter *adapter); +int chk_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability, + struct txgbe_adapter *adapter); +#endif + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c new file mode 100644 index 0000000000000000000000000000000000000000..0c5ac375427b0b4eab2ebd774d6eb31a5e61f0af --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c @@ -0,0 +1,653 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_dcb.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "txgbe_type.h" +#include "txgbe_dcb.h" +#include "txgbe.h" + +/* + * txgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +s32 txgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, + int max_frame_size) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) / + TXGBE_DCB_CREDIT_QUANTUM; + + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, TXGBE_DCB_MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = (u16)val; + + max[i] = (u16)(bw[i] ? (bw[i]*TXGBE_DCB_MAX_CREDIT)/100 : min_credit); + } + + return 0; +} + +/** + * txgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits + * @txgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * txgbe_dcb_check_config_cee(). + */ +s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config, + u32 max_frame_size, u8 direction) +{ + struct txgbe_dcb_tc_path *p; + u32 min_multiplier = 0; + u16 min_percent = 100; + s32 ret_val = 0; + /* Initialization values default for Tx settings */ + u32 min_credit = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + UNREFERENCED_PARAMETER(hw); + + if (dcb_config == NULL) { + ret_val = TXGBE_ERR_CONFIG; + goto out; + } + + min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) / + TXGBE_DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + (u32)TXGBE_DCB_MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * TXGBE_DCB_MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max < min_credit) + credit_max = min_credit; + + if (direction == TXGBE_DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + +out: + return ret_val; +} + +/** + * txgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info + * @cfg: dcb configuration to unpack into hardware consumable fields + * @map: user priority to traffic class map + * @pfc_up: u8 to store user priority PFC bitmask + * + * This unpacks the dcb configuration PFC info which is stored per + * traffic class into a 8bit user priority bitmask that can be + * consumed by hardware routines. The priority to tc map must be + * updated before calling this routine to use current up-to maps. + */ +void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; + + /* + * If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < TXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != txgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; + } +} + +void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; +} + +void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; +} + +void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; +} + +void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *tsa) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; +} + +u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + goto out; + + /* + * Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; + } +out: + return tc; +} + +void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *map) +{ + u8 up; + + for (up = 0; up < TXGBE_DCB_MAX_USER_PRIORITY; up++) + map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up); +} + + + + + + + + + + +/** + * txgbe_dcb_config_tc_stats - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 txgbe_dcb_config_tc_stats(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config) +{ + UNREFERENCED_PARAMETER(hw); + UNREFERENCED_PARAMETER(dcb_config); + return 0; +} + +/** + * txgbe_dcb_hw_config_cee - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config) +{ + s32 ret = TXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 tsa[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[TXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill); + txgbe_dcb_unpack_max_cee(dcb_config, max); + txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid); + txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa); + txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map); + + + txgbe_dcb_config(hw, dcb_config); + ret = txgbe_dcb_hw_config(hw, + refill, max, bwgid, + tsa, map); + + txgbe_dcb_config_tc_stats(hw, dcb_config); + + + if (!ret && dcb_config->pfc_mode_enable) { + txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + ret = txgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map) +{ + int ret = TXGBE_ERR_PARAM; + + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + + /* Enable Transmit Priority Flow Control */ + wr32(hw, TXGBE_RDB_RFCC, TXGBE_RDB_RFCC_RFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = 0; + + if (pfc_en) + reg |= (TXGBE_MAC_RX_FLOW_CTRL_PFCE | 0x1); + + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, reg); + + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) { + if (map[i] > max_tc) + max_tc = map[i]; + } + + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < TXGBE_DCB_MAX_USER_PRIORITY; j++) { + if ((map[j] == i) && (pfc_en & (1 << j))) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | + TXGBE_RDB_RFCH_XOFFE; + fcrtl = (hw->fc.low_water[i] << 10) | + TXGBE_RDB_RFCL_XONE; + wr32(hw, TXGBE_RDB_RFCH(i), fcrtl); + } else { + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = rd32(hw, TXGBE_RDB_PB_SZ(i)); + wr32(hw, TXGBE_RDB_RFCL(i), 0); + } + + wr32(hw, TXGBE_RDB_RFCH(i), reg); + } + + for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + wr32(hw, TXGBE_RDB_RFCL(i), 0); + wr32(hw, TXGBE_RDB_RFCH(i), 0); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + wr32(hw, TXGBE_RDB_RFCV(i), reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); + + return ret; +} + +s32 txgbe_dcb_hw_config(struct txgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map) +{ + txgbe_dcb_config_rx_arbiter(hw, refill, max, bwg_id, + tsa, map); + txgbe_dcb_config_tx_desc_arbiter(hw, refill, max, + bwg_id, tsa); + txgbe_dcb_config_tx_data_arbiter(hw, refill, max, + bwg_id, tsa, map); + + return 0; +} + +/** + * txgbe_dcb_config_rx_arbiter - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 txgbe_dcb_config_rx_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = TXGBE_RDM_ARB_CTL_RRM | TXGBE_RDM_ARB_CTL_RAC | + TXGBE_RDM_ARB_CTL_ARBDIS; + wr32(hw, TXGBE_RDM_ARB_CTL, reg); + + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + + wr32(hw, TXGBE_RDB_UP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | + (credit_max << TXGBE_RDM_ARB_CFG_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << TXGBE_RDM_ARB_CFG_BWG_SHIFT; + + if (tsa[i] == txgbe_dcb_tsa_strict) + reg |= TXGBE_RDM_ARB_CFG_LSP; + + wr32(hw, TXGBE_RDM_ARB_CFG(i), reg); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = TXGBE_RDM_ARB_CTL_RRM | TXGBE_RDM_ARB_CTL_RAC; + wr32(hw, TXGBE_RDM_ARB_CTL, reg); + + return 0; +} + +/** + * txgbe_dcb_config_tx_desc_arbiter - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 txgbe_dcb_config_tx_desc_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { + wr32(hw, TXGBE_TDM_VM_CREDIT(i), 0); + } + + /* Configure traffic class credits and priority */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << TXGBE_TDM_PBWARB_CFG_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << TXGBE_TDM_PBWARB_CFG_BWG_SHIFT; + + if (tsa[i] == txgbe_dcb_tsa_group_strict_cee) + reg |= TXGBE_TDM_PBWARB_CFG_GSP; + + if (tsa[i] == txgbe_dcb_tsa_strict) + reg |= TXGBE_TDM_PBWARB_CFG_LSP; + + wr32(hw, TXGBE_TDM_PBWARB_CFG(i), reg); + } + + /* + * Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = TXGBE_TDM_PBWARB_CTL_TDPAC | TXGBE_TDM_PBWARB_CTL_TDRM; + wr32(hw, TXGBE_TDM_PBWARB_CTL, reg); + + return 0; +} + +/** + * txgbe_dcb_config_tx_data_arbiter - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 txgbe_dcb_config_tx_data_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg; + u8 i; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = TXGBE_TDB_PBRARB_CTL_TPPAC | TXGBE_TDB_PBRARB_CTL_TPRM | + TXGBE_RTTPCS_ARBDIS; + wr32(hw, TXGBE_TDB_PBRARB_CTL, reg); + + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * TXGBE_TDB_UP2TC_UP_SHIFT)); + + wr32(hw, TXGBE_TDB_UP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << TXGBE_TDB_PBRARB_CFG_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << TXGBE_TDB_PBRARB_CFG_BWG_SHIFT; + + if (tsa[i] == txgbe_dcb_tsa_group_strict_cee) + reg |= TXGBE_TDB_PBRARB_CFG_GSP; + + if (tsa[i] == txgbe_dcb_tsa_strict) + reg |= TXGBE_TDB_PBRARB_CFG_LSP; + + wr32(hw, TXGBE_TDB_PBRARB_CFG(i), reg); + } + + /* + * Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = TXGBE_TDB_PBRARB_CTL_TPPAC | TXGBE_TDB_PBRARB_CTL_TPRM; + wr32(hw, TXGBE_TDB_PBRARB_CTL, reg); + + return 0; +} + +/** + * txgbe_dcb_config - Configure general DCB parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure general DCB parameters. + */ +s32 txgbe_dcb_config(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config) +{ + u32 n, value; + + struct txgbe_adapter *adapter = hw->back; + + if (dcb_config->vt_mode) + adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED; + else + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + if (dcb_config->num_tcs.pg_tcs == 8) + /* 8 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_NUM_VT_16 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (dcb_config->num_tcs.pg_tcs == 4) + /* 4 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_NUM_VT_32 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } else { + if (dcb_config->num_tcs.pg_tcs == 8) + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (dcb_config->num_tcs.pg_tcs == 4) + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else + value = 0; + } + + value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_TC_MASK | + TXGBE_CFG_PORT_CTL_NUM_VT_MASK | + TXGBE_CFG_PORT_CTL_DCB_EN | + TXGBE_CFG_PORT_CTL_D_VLAN | + TXGBE_CFG_PORT_CTL_QINQ, + value); + + /* Disable drop for all queues */ + for (n = 0; n < 4; n++) { + wr32(hw, TXGBE_RDM_PF_QDE(n), 0x0); + } + + return 0; +} + + + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..7bb35bdfa1c30c53dde5e6a3b9c6976ebb6b21cd --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h @@ -0,0 +1,208 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_dcb.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#ifndef _TXGBE_DCB_H_ +#define _TXGBE_DCB_H_ + +#include "txgbe_type.h" + +/* DCB defines */ +/* DCB credit calculation defines */ +#define TXGBE_DCB_CREDIT_QUANTUM 64 +#define TXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ +#define TXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ +#define TXGBE_DCB_MAX_CREDIT (2 * TXGBE_DCB_MAX_CREDIT_REFILL) + +/* 513 for 32KB TSO packet */ +#define TXGBE_DCB_MIN_TSO_CREDIT \ + ((TXGBE_DCB_MAX_TSO_SIZE / TXGBE_DCB_CREDIT_QUANTUM) + 1) + +/* DCB configuration defines */ +#define TXGBE_DCB_MAX_USER_PRIORITY 8 +#define TXGBE_DCB_MAX_BW_GROUP 8 +#define TXGBE_DCB_BW_PERCENT 100 + +#define TXGBE_DCB_TX_CONFIG 0 +#define TXGBE_DCB_RX_CONFIG 1 + +/* DCB capability defines */ +#define TXGBE_DCB_PG_SUPPORT 0x00000001 +#define TXGBE_DCB_PFC_SUPPORT 0x00000002 +#define TXGBE_DCB_BCN_SUPPORT 0x00000004 +#define TXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define TXGBE_DCB_GSP_SUPPORT 0x00000010 + +/* DCB register definitions */ +#define TXGBE_TDM_PBWARB_CTL_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +#define TXGBE_TDM_PBWARB_CTL_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define TXGBE_TDM_PBWARB_CTL_ARBDIS 0x00000040 /* DCB arbiter disable */ + + +/* Receive UP2TC mapping */ +#define TXGBE_RDB_UP2TC_UP_SHIFT 4 +#define TXGBE_RDB_UP2TC_UP_MASK 7 +/* Transmit UP2TC mapping */ +#define TXGBE_TDB_UP2TC_UP_SHIFT 4 + +#define TXGBE_RDM_ARB_CFG_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define TXGBE_RDM_ARB_CFG_BWG_SHIFT 9 /* Offset to BWG index */ +#define TXGBE_RDM_ARB_CFG_GSP 0x40000000 /* GSP enable bit */ +#define TXGBE_RDM_ARB_CFG_LSP 0x80000000 /* LSP enable bit */ + +/* RTRPCS Bit Masks */ +#define TXGBE_RDM_ARB_CTL_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define TXGBE_RDM_ARB_CTL_RAC 0x00000004 +#define TXGBE_RDM_ARB_CTL_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define TXGBE_TDM_PBWARB_CFG_MCL_SHIFT 12 +#define TXGBE_TDM_PBWARB_CFG_BWG_SHIFT 9 +#define TXGBE_TDM_PBWARB_CFG_GSP 0x40000000 +#define TXGBE_TDM_PBWARB_CFG_LSP 0x80000000 + +#define TXGBE_TDB_PBRARB_CFG_MCL_SHIFT 12 +#define TXGBE_TDB_PBRARB_CFG_BWG_SHIFT 9 +#define TXGBE_TDB_PBRARB_CFG_GSP 0x40000000 +#define TXGBE_TDB_PBRARB_CFG_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define TXGBE_TDB_PBRARB_CTL_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +#define TXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define TXGBE_TDB_PBRARB_CTL_TPRM 0x00000100 /* Transmit Recycle Mode enable*/ + +#define TXGBE_TDM_PB_THRE_DCB 0xA /* THRESH value for DCB mode */ + + +struct txgbe_dcb_support { + u32 capabilities; /* DCB capabilities */ + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +enum txgbe_dcb_tsa { + txgbe_dcb_tsa_ets = 0, + txgbe_dcb_tsa_group_strict_cee, + txgbe_dcb_tsa_strict +}; + +/* Traffic class bandwidth allocation per direction */ +struct txgbe_dcb_tc_path { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum txgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ +}; + +enum txgbe_dcb_pfc { + txgbe_dcb_pfc_disabled = 0, + txgbe_dcb_pfc_enabled, + txgbe_dcb_pfc_enabled_txonly, + txgbe_dcb_pfc_enabled_rxonly +}; + +/* Traffic class configuration */ +struct txgbe_dcb_tc_config { + struct txgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ + enum txgbe_dcb_pfc pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +enum txgbe_dcb_pba { + /* PBA[0-7] each use 64KB FIFO */ + txgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, + /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ + txgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED +}; + +struct txgbe_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct txgbe_dcb_config { + struct txgbe_dcb_tc_config tc_config[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + struct txgbe_dcb_support support; + struct txgbe_dcb_num_tcs num_tcs; + u8 bw_percentage[2][TXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + bool round_robin_enable; + + enum txgbe_dcb_pba rx_pba_cfg; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ + bool vt_mode; +}; + +/* DCB driver APIs */ + +/* DCB credits calculation */ +s32 txgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); +s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *, + struct txgbe_dcb_config *, u32, u8); + +/* DCB PFC */ +s32 txgbe_dcb_config_pfc(struct txgbe_hw *, u8, u8 *); + +/* DCB stats */ +s32 txgbe_dcb_config_tc_stats(struct txgbe_hw *, + struct txgbe_dcb_config *); + +/* DCB config arbiters */ +s32 txgbe_dcb_config_tx_desc_arbiter(struct txgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 txgbe_dcb_config_tx_data_arbiter(struct txgbe_hw *, u16 *, u16 *, + u8 *, u8 *, u8 *); +s32 txgbe_dcb_config_rx_arbiter(struct txgbe_hw *, u16 *, u16 *, u8 *, + u8 *, u8 *); + +/* DCB unpack routines */ +void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *, u8 *, u8 *); +void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *, int, u16 *); +void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *, u16 *); +void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *, int, u8 *); +void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *, int, u8 *); +void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *, int, u8 *); +u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *, int, u8); + +/* DCB initialization */ +s32 txgbe_dcb_config(struct txgbe_hw *, + struct txgbe_dcb_config *); +s32 txgbe_dcb_hw_config(struct txgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); +s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *, struct txgbe_dcb_config *); +#endif /* _TXGBE_DCB_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c new file mode 100644 index 0000000000000000000000000000000000000000..0ecca0a348cbbaac4613d517b22a113e80598848 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c @@ -0,0 +1,869 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_dcb_nl.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" + +#if IS_ENABLED(CONFIG_DCB) +#include +#include "txgbe_dcb.h" + +/* Callbacks for DCB netlink in the kernel */ +#define BIT_DCB_MODE 0x01 +#define BIT_PFC 0x02 +#define BIT_PG_RX 0x04 +#define BIT_PG_TX 0x08 +#define BIT_APP_UPCHG 0x10 +#define BIT_RESETLINK 0x40 +#define BIT_LINKSPEED 0x80 + +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + +int txgbe_copy_dcb_cfg(struct txgbe_adapter *adapter, int tc_max) +{ + struct txgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; + struct txgbe_dcb_config *dcfg = &adapter->dcb_cfg; + struct txgbe_dcb_tc_config *src = NULL; + struct txgbe_dcb_tc_config *dst = NULL; + int i, j; + int tx = TXGBE_DCB_TX_CONFIG; + int rx = TXGBE_DCB_RX_CONFIG; + int changes = 0; + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->fcoe.up_set != adapter->fcoe.up) + changes |= BIT_APP_UPCHG; +#endif /* CONFIG_FCOE */ + + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { + src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; + dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; + + if (dst->path[tx].tsa != src->path[tx].tsa) { + dst->path[tx].tsa = src->path[tx].tsa; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { + dst->path[tx].bwg_id = src->path[tx].bwg_id; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { + dst->path[tx].bwg_percent = src->path[tx].bwg_percent; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].up_to_tc_bitmap != + src->path[tx].up_to_tc_bitmap) { + dst->path[tx].up_to_tc_bitmap = + src->path[tx].up_to_tc_bitmap; + changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); + } + + if (dst->path[rx].tsa != src->path[rx].tsa) { + dst->path[rx].tsa = src->path[rx].tsa; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { + dst->path[rx].bwg_id = src->path[rx].bwg_id; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { + dst->path[rx].bwg_percent = src->path[rx].bwg_percent; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].up_to_tc_bitmap != + src->path[rx].up_to_tc_bitmap) { + dst->path[rx].up_to_tc_bitmap = + src->path[rx].up_to_tc_bitmap; + changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); + } + } + + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { + j = i - DCB_PG_ATTR_BW_ID_0; + + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { + dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; + changes |= BIT_PG_TX; + } + if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { + dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; + changes |= BIT_PG_RX; + } + } + + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { + j = i - DCB_PFC_UP_ATTR_0; + if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) { + dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc; + changes |= BIT_PFC; + } + } + + if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { + dcfg->pfc_mode_enable = scfg->pfc_mode_enable; + changes |= BIT_PFC; + } + + return changes; +} + +static u8 txgbe_dcbnl_get_state(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & TXGBE_FLAG_DCB_ENABLED); +} + +static u8 txgbe_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & TXGBE_FLAG_DCB_ENABLED)) + goto out; + + err = txgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +out: + return !!err; +} + +static void txgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) + perm_addr[i] = adapter->hw.mac.perm_addr[i]; + + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; + +} + +static void txgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = + up_map; +} + +static void txgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; +} + +static void txgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = + up_map; +} + +static void txgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; +} + +static void txgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +} + +static void txgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; +} + +static void txgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +} + +static void txgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; +} + +static void txgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = txgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); + + adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; + if (adapter->temp_dcb_cfg.tc_config[tc].pfc != + adapter->dcb_cfg.tc_config[tc].pfc) + adapter->temp_dcb_cfg.pfc_mode_enable = true; +} + +static void txgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = txgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); + *pfc = adapter->dcb_cfg.tc_config[tc].pfc; +} + +static void txgbe_dcbnl_devreset(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops->ndo_stop(dev); +#else + dev->stop(dev); +#endif + + txgbe_clear_interrupt_scheme(adapter); + txgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops->ndo_open(dev); +#else + dev->open(dev); +#endif + + clear_bit(__TXGBE_RESETTING, &adapter->state); +} + +static u8 txgbe_dcbnl_set_all(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct txgbe_hw *hw = &adapter->hw; + int ret = DCB_NO_HW_CHG; + u8 prio_tc[TXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return ret; + + adapter->dcb_set_bitmap |= txgbe_copy_dcb_cfg(adapter, + TXGBE_DCB_MAX_TRAFFIC_CLASS); + if (!adapter->dcb_set_bitmap) + return ret; + + txgbe_dcb_unpack_map_cee(dcb_cfg, TXGBE_DCB_TX_CONFIG, prio_tc); + + if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) { + /* Priority to TC mapping in CEE case default to 1:1 */ + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; +#ifdef HAVE_MQPRIO + int i; +#endif + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + txgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + TXGBE_DCB_TX_CONFIG); + + txgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + TXGBE_DCB_RX_CONFIG); + + txgbe_dcb_hw_config_cee(hw, dcb_cfg); + +#ifdef HAVE_MQPRIO + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); +#endif /* HAVE_MQPRIO */ + ret = DCB_HW_CHG_RST; + } + + if (adapter->dcb_set_bitmap & BIT_PFC) { + if (dcb_cfg->pfc_mode_enable) { + u8 pfc_en; + txgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en); + txgbe_dcb_config_pfc(hw, pfc_en, prio_tc); + } else { + TCALL(hw, mac.ops.fc_enable); + } + txgbe_set_rx_drop_en(adapter); + if (ret != DCB_HW_CHG_RST) + ret = DCB_HW_CHG; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* Reprogam FCoE hardware offloads when the traffic class + * FCoE is using changes. This happens if the APP info + * changes or the up2tc mapping is updated. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + adapter->fcoe.up_set = adapter->fcoe.up; + txgbe_dcbnl_devreset(netdev); + ret = DCB_HW_CHG_RST; + } +#endif /* CONFIG_FCOE */ + + adapter->dcb_set_bitmap = 0x00; + return ret; +} + +static u8 txgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ +#ifdef HAVE_DCBNL_IEEE + struct txgbe_adapter *adapter = netdev_priv(netdev); +#endif + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; +#ifdef HAVE_DCBNL_IEEE + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; +#endif + default: + *cap = false; + break; + } + + return 0; +} + +#ifdef NUMTCS_RETURNS_U8 +static u8 txgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#else +static int txgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = adapter->dcb_cfg.num_tcs.pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +#ifdef NUMTCS_RETURNS_U8 +static u8 txgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#else +static int txgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + adapter->dcb_cfg.num_tcs.pg_tcs = num; + break; + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static u8 txgbe_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->dcb_cfg.pfc_mode_enable; +} + +static void txgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; + return; +} + +#ifdef HAVE_DCBNL_OPS_GETAPP +/** + * txgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * + * Returns : on success, returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to indicate an + * error. + */ +#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT +static int txgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +#else +static u8 txgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +#endif +{ + u8 rval = 0; +#ifdef HAVE_DCBNL_IEEE + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + rval = dcb_getapp(netdev, &app); +#endif + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) + rval = txgbe_fcoe_getapp(netdev); +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return rval; +} + +/** + * txgbe_dcbnl_setapp - set the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * @up: the 802.1p user priority bitmap + * + * Returns : 0 on success or 1 on error + */ +#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT +static int txgbe_dcbnl_setapp(struct net_device *netdev, +#else +static u8 txgbe_dcbnl_setapp(struct net_device *netdev, +#endif + u8 idtype, u16 id, u8 up) +{ + int err = 0; +#ifdef HAVE_DCBNL_IEEE + struct dcb_app app; + + app.selector = idtype; + app.protocol = id; + app.priority = up; + err = dcb_setapp(netdev, &app); +#endif + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) { + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->fcoe.up = up ? ffs(up) - 1 : TXGBE_FCOE_DEFUP; + } +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return err; +} +#endif /* HAVE_DCBNL_OPS_GETAPP */ + +#ifdef HAVE_DCBNL_IEEE +static int txgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->txgbe_ieee_ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +} + +static int txgbe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i, err = 0; + __u8 max_tc = 0; + __u8 map_chg = 0; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->txgbe_ieee_ets) { + adapter->txgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), + GFP_KERNEL); + if (!adapter->txgbe_ieee_ets) + return -ENOMEM; + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->txgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ + TCALL(&adapter->hw, mac.ops.get_rtrup2tc, + adapter->txgbe_ieee_ets->prio_tc); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] != adapter->txgbe_ieee_ets->prio_tc[i]) + map_chg = 1; + } + + memcpy(adapter->txgbe_ieee_ets, ets, sizeof(*adapter->txgbe_ieee_ets)); + + if (max_tc) + max_tc++; + + if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (max_tc != netdev_get_num_tc(dev)) + err = txgbe_setup_tc(dev, max_tc); + else if (map_chg) + txgbe_dcbnl_devreset(dev); + + if (err) + goto err_out; + + err = txgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +err_out: + return err; +} + +static int txgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *my_pfc = adapter->txgbe_ieee_pfc; + int i; + + /* No IEEE PFC settings available */ + if (!my_pfc) + return -EINVAL; + + pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } + + return 0; +} + +static int txgbe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u8 *prio_tc; + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->txgbe_ieee_pfc) { + adapter->txgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), + GFP_KERNEL); + if (!adapter->txgbe_ieee_pfc) + return -ENOMEM; + } + + prio_tc = adapter->txgbe_ieee_ets->prio_tc; + memcpy(adapter->txgbe_ieee_pfc, pfc, sizeof(*adapter->txgbe_ieee_pfc)); + + + /* Enable link flow control parameters if PFC is disabled */ + if (pfc->pfc_en) + err = txgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc); + else + err = TCALL(hw, mac.ops.fc_enable); + + txgbe_set_rx_drop_en(adapter); + + return err; +} + +static int txgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int err = -EINVAL; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return err; + + err = dcb_ieee_setapp(dev, app); + +#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return err; + + adapter->fcoe.up = app->priority; + adapter->fcoe.up_set = adapter->fcoe.up; + txgbe_dcbnl_devreset(dev); + } +#endif + return 0; +} + +#ifdef HAVE_DCBNL_IEEE_DELAPP +static int txgbe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_delapp(dev, app); + +#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return err; + + adapter->fcoe.up = app_mask ? + ffs(app_mask) - 1 : TXGBE_FCOE_DEFUP; + txgbe_dcbnl_devreset(dev); + } +#endif + return err; +} +#endif /* HAVE_DCBNL_IEEE_DELAPP */ + +static u8 txgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + return adapter->dcbx_cap; +} + +static u8 txgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = { .ets_cap = 0 }; + struct ieee_pfc pfc = { .pfc_en = 0 }; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + txgbe_dcbnl_ieee_setets(dev, &ets); + txgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG); + + adapter->dcb_set_bitmap |= mask; + txgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + txgbe_dcbnl_ieee_setets(dev, &ets); + txgbe_dcbnl_ieee_setpfc(dev, &pfc); + txgbe_setup_tc(dev, 0); + } + + return 0; +} +#endif + +struct dcbnl_rtnl_ops dcbnl_ops = { +#ifdef HAVE_DCBNL_IEEE + .ieee_getets = txgbe_dcbnl_ieee_getets, + .ieee_setets = txgbe_dcbnl_ieee_setets, + .ieee_getpfc = txgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = txgbe_dcbnl_ieee_setpfc, + .ieee_setapp = txgbe_dcbnl_ieee_setapp, +#ifdef HAVE_DCBNL_IEEE_DELAPP + .ieee_delapp = txgbe_dcbnl_ieee_delapp, +#endif +#endif + .getstate = txgbe_dcbnl_get_state, + .setstate = txgbe_dcbnl_set_state, + .getpermhwaddr = txgbe_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = txgbe_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = txgbe_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = txgbe_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = txgbe_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = txgbe_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = txgbe_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = txgbe_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = txgbe_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = txgbe_dcbnl_set_pfc_cfg, + .getpfccfg = txgbe_dcbnl_get_pfc_cfg, + .setall = txgbe_dcbnl_set_all, + .getcap = txgbe_dcbnl_getcap, + .getnumtcs = txgbe_dcbnl_getnumtcs, + .setnumtcs = txgbe_dcbnl_setnumtcs, + .getpfcstate = txgbe_dcbnl_getpfcstate, + .setpfcstate = txgbe_dcbnl_setpfcstate, +#ifdef HAVE_DCBNL_OPS_GETAPP + .getapp = txgbe_dcbnl_getapp, + .setapp = txgbe_dcbnl_setapp, +#endif +#ifdef HAVE_DCBNL_IEEE + .getdcbx = txgbe_dcbnl_getdcbx, + .setdcbx = txgbe_dcbnl_setdcbx, +#endif +}; + +#endif /* CONFIG_DCB */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..8e78215d4033de799468d00244432b928ef38ac8 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c @@ -0,0 +1,801 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_debugfs.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" + +#ifdef HAVE_TXGBE_DEBUG_FS +#include +#include + +static struct dentry *txgbe_dbg_root; +static int txgbe_data_mode; + +#define TXGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF) +#define TXGBE_DATA_ARGS(dm) ((dm) & 0xFFFF) +enum txgbe_data_func { + TXGBE_FUNC_NONE = (0 << 16), + TXGBE_FUNC_DUMP_BAR = (1 << 16), + TXGBE_FUNC_DUMP_RDESC = (2 << 16), + TXGBE_FUNC_DUMP_TDESC = (3 << 16), + TXGBE_FUNC_FLASH_READ = (4 << 16), + TXGBE_FUNC_FLASH_WRITE = (5 << 16), +}; + +/** + * data operation + **/ +static ssize_t +txgbe_simple_read_from_pcibar(struct txgbe_adapter *adapter, int res, + void __user *buf, size_t size, loff_t *ppos) +{ + loff_t pos = *ppos; + u32 miss, len, limit = pci_resource_len(adapter->pdev, res); + + if (pos < 0) + return 0; + + limit = (pos + size <= limit ? pos + size : limit); + for (miss = 0; pos < limit && !miss; buf += len, pos += len) { + u32 val = 0, reg = round_down(pos, 4); + u32 off = pos - reg; + + len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4)); + val = txgbe_rd32(adapter->io_addr + reg); + miss = copy_to_user(buf, &val + off, len); + } + + size = pos - *ppos - miss; + *ppos += size; + + return size; +} + +static ssize_t +txgbe_simple_read_from_flash(struct txgbe_adapter *adapter, + void __user *buf, size_t size, loff_t *ppos) +{ + struct txgbe_hw *hw = &adapter->hw; + loff_t pos = *ppos; + size_t ret = 0; + loff_t rpos, rtail; + void __user *to = buf; + size_t available = adapter->hw.flash.dword_size << 2; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !size) + return 0; + if (size > available - pos) + size = available - pos; + + rpos = round_up(pos, 4); + rtail = round_down(pos + size, 4); + if (rtail < rpos) + return 0; + + to += rpos - pos; + while (rpos <= rtail) { + u32 value = txgbe_rd32(adapter->io_addr + rpos); + if (TCALL(hw, flash.ops.write_buffer, rpos>>2, 1, &value)) { + ret = size; + break; + } + if (4 == copy_to_user(to, &value, 4)) { + ret = size; + break; + } + to += 4; + rpos += 4; + } + + if (ret == size) + return -EFAULT; + size -= ret; + *ppos = pos + size; + return size; +} + +static ssize_t +txgbe_simple_write_to_flash(struct txgbe_adapter *adapter, + const void __user *from, size_t size, loff_t *ppos, size_t available) +{ + return size; +} + +static ssize_t +txgbe_dbg_data_ops_read(struct file *filp, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + u32 func = TXGBE_DATA_FUNC(txgbe_data_mode); + + rmb(); + + switch (func) { + case TXGBE_FUNC_DUMP_BAR: { + u32 bar = TXGBE_DATA_ARGS(txgbe_data_mode); + + return txgbe_simple_read_from_pcibar(adapter, bar, buffer, size, + ppos); + } + case TXGBE_FUNC_FLASH_READ: { + return txgbe_simple_read_from_flash(adapter, buffer, size, ppos); + } + case TXGBE_FUNC_DUMP_RDESC: { + struct txgbe_ring *ring; + u32 queue = TXGBE_DATA_ARGS(txgbe_data_mode); + + if (queue >= adapter->num_rx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->rx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + case TXGBE_FUNC_DUMP_TDESC: { + struct txgbe_ring *ring; + u32 queue = TXGBE_DATA_ARGS(txgbe_data_mode); + + if (queue >= adapter->num_tx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->tx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + default: + break; + } + + return 0; +} + +static ssize_t +txgbe_dbg_data_ops_write(struct file *filp, + const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + u32 func = TXGBE_DATA_FUNC(txgbe_data_mode); + + rmb(); + + switch (func) { + case TXGBE_FUNC_FLASH_WRITE: { + u32 size = TXGBE_DATA_ARGS(txgbe_data_mode); + + if (size > adapter->hw.flash.dword_size << 2) + size = adapter->hw.flash.dword_size << 2; + + return txgbe_simple_write_to_flash(adapter, buffer, size, ppos, size); + } + default: + break; + } + + return size; +} +static struct file_operations txgbe_dbg_data_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = txgbe_dbg_data_ops_read, + .write = txgbe_dbg_data_ops_write, +}; + +/** + * reg_ops operation + **/ +static char txgbe_dbg_reg_ops_buf[256] = ""; +static ssize_t +txgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, txgbe_data_mode, + txgbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +txgbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + char *pc = txgbe_dbg_reg_ops_buf; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(txgbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(txgbe_dbg_reg_ops_buf, + sizeof(txgbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + pc[len] = '\0'; + + if (strncmp(pc, "dump", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 4; + pc += strspn(pc, " \t"); + + if (!strncmp(pc, "bar", 3)) { + pc += 3; + mode = TXGBE_FUNC_DUMP_BAR; + } else if (!strncmp(pc, "rdesc", 5)) { + pc += 5; + mode = TXGBE_FUNC_DUMP_RDESC; + } else if (!strncmp(pc, "tdesc", 5)) { + pc += 5; + mode = TXGBE_FUNC_DUMP_TDESC; + } else { + txgbe_dump(adapter); + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + txgbe_data_mode = mode; + } else if (strncmp(pc, "flash", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 5; + pc += strspn(pc, " \t"); + if (!strncmp(pc, "read", 3)) { + pc += 4; + mode = TXGBE_FUNC_FLASH_READ; + } else if (!strncmp(pc, "write", 5)) { + pc += 5; + mode = TXGBE_FUNC_FLASH_WRITE; + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + txgbe_data_mode = mode; + } else if (strncmp(txgbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&txgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + wr32(&adapter->hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(txgbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&txgbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = rd32(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", txgbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations txgbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = txgbe_dbg_reg_ops_read, + .write = txgbe_dbg_reg_ops_write, +}; + +/** + * netdev_ops operation + **/ +static char txgbe_dbg_netdev_ops_buf[256] = ""; +static ssize_t +txgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, txgbe_data_mode, + txgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +txgbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(txgbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(txgbe_dbg_netdev_ops_buf, + sizeof(txgbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + txgbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(txgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { +#if defined(HAVE_TX_TIMEOUT_TXQUEUE) + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, 0); +#elif defined(HAVE_NET_DEVICE_OPS) + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#else + adapter->netdev->tx_timeout(adapter->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", txgbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static struct file_operations txgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = txgbe_dbg_netdev_ops_read, + .write = txgbe_dbg_netdev_ops_write, +}; + +/** + * txgbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void txgbe_dbg_adapter_init(struct txgbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + + adapter->txgbe_dbg_adapter = debugfs_create_dir(name, txgbe_dbg_root); + if (!adapter->txgbe_dbg_adapter) { + e_dev_err("debugfs entry for %s failed\n", name); + return; + } + + pfile = debugfs_create_file("data", 0600, + adapter->txgbe_dbg_adapter, adapter, + &txgbe_dbg_data_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("reg_ops", 0600, + adapter->txgbe_dbg_adapter, adapter, + &txgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->txgbe_dbg_adapter, adapter, + &txgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); +} + +/** + * txgbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void txgbe_dbg_adapter_exit(struct txgbe_adapter *adapter) +{ + if (adapter->txgbe_dbg_adapter) + debugfs_remove_recursive(adapter->txgbe_dbg_adapter); + adapter->txgbe_dbg_adapter = NULL; +} + +/** + * txgbe_dbg_init - start up debugfs for the driver + **/ +void txgbe_dbg_init(void) +{ + txgbe_dbg_root = debugfs_create_dir(txgbe_driver_name, NULL); + if (txgbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * txgbe_dbg_exit - clean out the driver's debugfs entries + **/ +void txgbe_dbg_exit(void) +{ + debugfs_remove_recursive(txgbe_dbg_root); +} + +#endif /* HAVE_TXGBE_DEBUG_FS */ + +struct txgbe_reg_info { + u32 offset; + u32 length; + char *name; +}; + +static struct txgbe_reg_info txgbe_reg_info_tbl[] = { + + /* General Registers */ + {TXGBE_CFG_PORT_CTL, 1, "CTRL"}, + {TXGBE_CFG_PORT_ST, 1, "STATUS"}, + + /* RX Registers */ + {TXGBE_PX_RR_CFG(0), 1, "SRRCTL"}, + {TXGBE_PX_RR_RP(0), 1, "RDH"}, + {TXGBE_PX_RR_WP(0), 1, "RDT"}, + {TXGBE_PX_RR_CFG(0), 1, "RXDCTL"}, + {TXGBE_PX_RR_BAL(0), 1, "RDBAL"}, + {TXGBE_PX_RR_BAH(0), 1, "RDBAH"}, + + /* TX Registers */ + {TXGBE_PX_TR_BAL(0), 1, "TDBAL"}, + {TXGBE_PX_TR_BAH(0), 1, "TDBAH"}, + {TXGBE_PX_TR_RP(0), 1, "TDH"}, + {TXGBE_PX_TR_WP(0), 1, "TDT"}, + {TXGBE_PX_TR_CFG(0), 1, "TXDCTL"}, + + /* MACVLAN */ + {TXGBE_PSR_MAC_SWC_VM_H, 128, "PSR_MAC_SWC_VM"}, + {TXGBE_PSR_MAC_SWC_AD_L, 128, "PSR_MAC_SWC_AD"}, + {TXGBE_PSR_VLAN_TBL(0), 128, "PSR_VLAN_TBL"}, + + /* QoS */ + {TXGBE_TDM_RP_RATE, 128, "TDM_RP_RATE"}, + + /* List Terminator */ + { .name = NULL } +}; + +/** + * txgbe_regdump - register printout routine + **/ +static void +txgbe_regdump(struct txgbe_hw *hw, struct txgbe_reg_info *reg_info) +{ + #if 0 + int i, n = 0; + u32 buffer[32*8]; + + switch (reg_info->offset) { + case TXGBE_PSR_MAC_SWC_VM_H: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i); + buffer[n++] = + rd32(hw, TXGBE_PSR_MAC_SWC_VM_H); + buffer[n++] = + rd32(hw, TXGBE_PSR_MAC_SWC_VM_L); + } + break; + case TXGBE_PSR_MAC_SWC_AD_L: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i); + buffer[n++] = + rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + buffer[n++] = + rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); + } + break; + case TXGBE_TDM_RP_RATE: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, TXGBE_TDM_RP_IDX, i); + buffer[n++] = rd32(hw, TXGBE_TDM_RP_RATE); + } + break; + default: + for (i = 0; i < reg_info->length; i++) { + buffer[n++] = rd32(hw, + reg_info->offset + 4*i); + } + break; + } +#if 0 + for (i = 0; n && i < 32; i++) { + pr_info("%-20s[%02x-%02x]", reg_info->name, i*8, i*8 + 7); + for (j = 0; n && j < 8; j++, n--) + pr_cont(" %08x", buffer[i*8 + j]); + pr_cont("\n"); + } +#endif + BUG_ON(n); +#endif +} + +/** + * txgbe_dump - Print registers, tx-rings and rx-rings + **/ +void txgbe_dump(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_reg_info *reg_info; + int n = 0; + struct txgbe_ring *tx_ring; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct txgbe_ring *rx_ring; + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reg_info = txgbe_reg_info_tbl; reg_info->name; reg_info++) { + txgbe_regdump(hw, reg_info); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Transmit Descriptor (Read) + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * Transmit Descriptor (Write-Back) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = TXGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX " + "%08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Receive Descriptor Formats + * + * Receive Descriptor (Read) + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Receive Descriptor (Write-Back) + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = TXGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & TXGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->page_dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->page_dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + txgbe_rx_bufsz(rx_ring), true); + } +#endif + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c new file mode 100644 index 0000000000000000000000000000000000000000..c82417726b989e398e5e6ac8d31fa4134b3f8c77 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c @@ -0,0 +1,3762 @@ +#include "txgbe_e56.h" +#include "txgbe_hw.h" + +#include + +void field_set(u32 *psrcdata, u32 bithigh, u32 bitlow, u32 setvalue) +{ + *psrcdata &= ~GENMASK(bithigh, bitlow); + *psrcdata |= FIELD_PREP_M(GENMASK(bithigh, bitlow), setvalue); +} + +s32 txgbe_e56_check_phy_link(struct txgbe_hw *hw, u32 *speed, + bool *link_up) +{ + struct txgbe_adapter *adapter = hw->back; + u32 rdata = 0; + u32 links_reg = 0; + + /* must read it twice because the state may + * not be correct the first time you read it + */ + rdata = txgbe_rd32_epcs(hw, 0x30001); + rdata = txgbe_rd32_epcs(hw, 0x30001); + + if (rdata & TXGBE_E56_PHY_LINK_UP) + *link_up = true; + else + *link_up = false; + + if (!adapter->link_valid) + *link_up = false; + + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) == + TXGBE_CFG_PORT_ST_AML_LINK_40G) + *speed = TXGBE_LINK_SPEED_40GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) == + TXGBE_CFG_PORT_ST_AML_LINK_25G) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + + +static u32 E56phyTxFfeCfg(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + u32 addr; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + adapter->aml_txeq.main = S10G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S10G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S10G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S10G_TX_FFE_CFG_POST; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + adapter->aml_txeq.main = S25G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_POST; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw)) { + adapter->aml_txeq.main = S25G_TX_FFE_CFG_DAC_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_DAC_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_DAC_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_DAC_POST; + } + } else if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + adapter->aml_txeq.main = S10G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S10G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S10G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S10G_TX_FFE_CFG_POST; + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) { + adapter->aml_txeq.main = 0x2b2b2b2b; + adapter->aml_txeq.pre1 = 0x03030303; + adapter->aml_txeq.pre2 = 0; + adapter->aml_txeq.post = 0x11111111; + } + } else { + return 0; + } + + addr = 0x141c; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.main); + + addr = 0x1420; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.pre1); + + addr = 0x1424; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.pre2); + + addr = 0x1428; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.post); + + return 0; +} + +int txgbe_e56_get_temp(struct txgbe_hw *hw, int *pTempData) +{ + int data_code, temp_data, temp_fraction; + u32 rdata; + u32 timer = 0; + + while(1) { + rdata = rd32(hw, 0x1033c); + if (((rdata >> 12) & 0x1 )!= 0) + break; + + if (timer++ > PHYINIT_TIMEOUT) { + return -ETIMEDOUT; + } + } + + data_code = rdata & 0xFFF; + temp_data = 419400 + 2205 * (data_code * 1000 / 4094 - 500); + + //Change double Temperature to int + *pTempData = temp_data/10000; + temp_fraction = temp_data - (*pTempData * 10000); + if (temp_fraction >= 5000) + *pTempData += 1; + + return 0; +} + +u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + int i; + + //CMS Config Master + addr = E56G_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDVAL_7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + //TXS Config Master + for (i = 0; i < 4; i++) { + addr = E56PHY_TXS_TXS_CFG_1_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + //Setting TX FFE + E56phyTxFfeCfg(hw, TXGBE_LINK_SPEED_40GB_FULL); + + //RXS Config master + for (i = 0; i < 4; i++) { + addr = E56PHY_RXS_RXS_CFG_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->prediv0 = 0xfa0; + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->target_cnt0= 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_range_sel0= 0x2; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->vco_code_init= 0x7ff; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_current_boost_en0= 0x1; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xc); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, + S10G_PHY_RX_CTLE_TAP_FRACP1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, + S10G_PHY_RX_CTLE_TAP_FRACP2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, + S10G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, + 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS3_ANA_OVRDVAL_11_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw ,addr); + ((E56G__RXS3_ANA_OVRDVAL_11 *)&rdata)->ana_test_adc_clkgen_i = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw ,addr); + ((E56G__RXS0_ANA_OVRDEN_2 *)&rdata)->ovrd_en_ana_test_adc_clkgen_i = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x6); + field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 9, 4, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + } + + // PDIG Config master + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +u32 txgbe_e56_cfg_25g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + + addr = E56PHY_CMS_PIN_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 24, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + E56phyTxFfeCfg(hw, TXGBE_LINK_SPEED_25GB_FULL); + + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1, 0x700); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1, 0x2418); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1, 0x1); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT, 0x7fb); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1, 0x0); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xa); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1, 0x3333); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1f8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0xf0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_FOM_18__ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB, 0x0); + //change 0x90 to 0x0 to fix 25G link up keep when cable unplugged + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB, 0x0); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB, + E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, S25G_PHY_RX_CTLE_TAP_FRACP1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, S25G_PHY_RX_CTLE_TAP_FRACP2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, S25G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, S25G_PHY_RX_CTLE_TAPWT_WEIGHT1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, S25G_PHY_RX_CTLE_TAPWT_WEIGHT2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, S25G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x0); + field_set(&rdata, 14, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +u32 txgbe_e56_cfg_10g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + + addr = E56G_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDVAL_7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //Setting TX FFE + E56phyTxFfeCfg(hw, TXGBE_LINK_SPEED_10GB_FULL); + + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->prediv0 = 0xfa0; + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->target_cnt0= 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_range_sel0= 0x2; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->vco_code_init= 0x7ff; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_current_boost_en0= 0x1; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xc); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, S10G_PHY_RX_CTLE_TAP_FRACP1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, S10G_PHY_RX_CTLE_TAP_FRACP2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, S10G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, S10G_PHY_RX_CTLE_TAPWT_WEIGHT1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, S10G_PHY_RX_CTLE_TAPWT_WEIGHT2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, S10G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x6); + field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 9, 4, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int E56phyRxsOscInitForTempTrackRange(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int addr, rdata, timer; + int T=40; + int RX_COARSE_MID_TD, CMVAR_RANGE_H = 0, CMVAR_RANGE_L = 0; + int OFFSET_CENTRE_RANGE_H, OFFSET_CENTRE_RANGE_L, RANGE_FINAL; + int osc_freq_err_occur; + int i = 0; + int lane_num = 1; + //1. Read the temperature T just before RXS is enabled. + txgbe_e56_get_temp(hw, &T); + + //2. Define software variable RX_COARSE_MID_TD (RX Coarse Code mid value dependent upon temperature) + if(T < -5) { RX_COARSE_MID_TD = 10; } + else if(T < 30) { RX_COARSE_MID_TD = 9; } + else if(T < 65) { RX_COARSE_MID_TD = 8; } + else if(T < 100) { RX_COARSE_MID_TD = 7; } + else { RX_COARSE_MID_TD = 6; } + + //Set CMVAR_RANGE_H/L based on the link speed mode + if (speed == TXGBE_LINK_SPEED_10GB_FULL || speed == TXGBE_LINK_SPEED_40GB_FULL) { //10G mode + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { //25G mode + CMVAR_RANGE_H = S25G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S25G_CMVAR_RANGE_L; + } + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) + lane_num = 4; + // TBD select all lane + //3. Program ALIAS::RXS::RANGE_SEL = CMVAR::RANGE_H + // RXS0_ANA_OVRDVAL[5] + // ana_bbcdr_osc_range_sel_i[1:0] + for (i = 0; i < lane_num; i++) { + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + CMVAR_RANGE_H); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_ANA_OVRDEN[0] + // [29] ovrd_en_ana_bbcdr_osc_range_sel_i + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //4. Do SEQ::RX_ENABLE to enable RXS, and let it stop after oscillator calibration. + //This needs to be done by blocking the RX power-up fsm at the state following the oscillator calibration state. + //Follow below steps to do the same before SEQ::RX_ENABLE. + //a. ALIAS::PDIG::CTRL_FSM_RX_ST can be stopped at RX_SAMP_CAL_ST which is the state + //after RX_OSC_CAL_ST by configuring ALIAS::RXS::SAMP_CAL_DONE=0b0 + + // RXS0_OVRDVAL[0] + // [22] rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDEN[0] + // [27] ovrd_en_rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //Do SEQ::RX_ENABLE to enable RXS + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, (0x1 << i)); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Poll ALIAS::PDIG::CTRL_FSM_RX_ST and confirm its value is RX_SAMP_CAL_ST + // poll CTRL_FSM_RX_ST + rdata = 0; + timer = 0; + osc_freq_err_occur = 0; + while((rdata >> (i * 8) & 0x3f) != 0x9) { //Bit[5:0]!= 0x9 + udelay(500); + // INTR[0] + // [11:8] intr_rx_osc_freq_err + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // TBD is always osc_freq_err occur? + if(rdata & (0x100 << i)) { + osc_freq_err_occur = 1; + break; + } + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } + } + + //5/6.Define software variable as OFFSET_CENTRE_RANGE_H = ALIAS::RXS::COARSE + //- RX_COARSE_MID_TD. Clear the INTR. + rdata = 0; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_H = (rdata >> 4) & 0xf; + if(OFFSET_CENTRE_RANGE_H > RX_COARSE_MID_TD) { + OFFSET_CENTRE_RANGE_H = OFFSET_CENTRE_RANGE_H - RX_COARSE_MID_TD; + } else { + OFFSET_CENTRE_RANGE_H = RX_COARSE_MID_TD - OFFSET_CENTRE_RANGE_H; + } + + //7. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST and confirm + //its value is POWERDN_ST + + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + timer = 0; + while(1) { + udelay(500); + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if(((rdata >> (i * 8)) & 0x3f) == 0x21) { break; } + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } + } + + //8. Since RX power-up fsm is stopped in RX_SAMP_CAL_ST, it is possible the timeout interrupt is set. + //Clear the same by clearing ALIAS::PDIG::INTR_CTRL_FSM_RX_ERR. + //Also clear ALIAS::PDIG::INTR_RX_OSC_FREQ_ERR which could also be set. + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + + udelay(500); + addr = E56PHY_INTR_0_ADDR; + txgbe_wr32_ephy(hw, addr, rdata); + + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // next round + + //9. Program ALIAS::RXS::RANGE_SEL = CMVAR::RANGE_L + // RXS0_ANA_OVRDVAL[5] + // ana_bbcdr_osc_range_sel_i[1:0] + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + CMVAR_RANGE_L); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_ANA_OVRDEN[0] + // [29] ovrd_en_ana_bbcdr_osc_range_sel_i + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //10. Do SEQ::RX_ENABLE to enable RXS, and let it stop after oscillator calibration. + // RXS0_OVRDVAL[0] + // [22] rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDEN[0] + // [27] ovrd_en_rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, (0x1 << i)); + txgbe_wr32_ephy(hw, addr, rdata); + + // poll CTRL_FSM_RX_ST + timer = 0; + osc_freq_err_occur = 0; + while(((rdata >> (i * 8)) & 0x3f) != 0x9) { //Bit[5:0]!= 0x9 + udelay(500); + // INTR[0] + // [11:8] intr_rx_osc_freq_err + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // TBD is always osc_freq_err occur? + if((rdata & 0x100) == 0x100) { + osc_freq_err_occur = 1; + break; + } + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } + + //11/12.Define software variable as OFFSET_CENTRE_RANGE_L = ALIAS::RXS::COARSE - + //RX_COARSE_MID_TD. Clear the INTR. + rdata = 0; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_L = (rdata >> 4) & 0xf; + if(OFFSET_CENTRE_RANGE_L > RX_COARSE_MID_TD) { + OFFSET_CENTRE_RANGE_L = OFFSET_CENTRE_RANGE_L - RX_COARSE_MID_TD; + } else { + OFFSET_CENTRE_RANGE_L = RX_COARSE_MID_TD - OFFSET_CENTRE_RANGE_L; + } + + //13. Perform below calculation in software. Goal is to pick range value which is closer to RX_COARSE_MID_TD + if (OFFSET_CENTRE_RANGE_L < OFFSET_CENTRE_RANGE_H) { + RANGE_FINAL = CMVAR_RANGE_L; + } + else { + RANGE_FINAL = CMVAR_RANGE_H; + } + + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + timer = 0; + while(1) { + udelay(500); + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if(((rdata >> (i * 8)) & 0x3f) == 0x21) { break; } + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } + + //15. Since RX power-up fsm is stopped in RX_SAMP_CAL_ST, + //it is possible the timeout interrupt is set. Clear the same by clearing + //ALIAS::PDIG::INTR_CTRL_FSM_RX_ERR. Also clear ALIAS::PDIG::INTR_RX_OSC_FREQ_ERR + //which could also be set. + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + udelay(500); + txgbe_wr32_ephy(hw, addr, rdata); + + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + + //16. Program ALIAS::RXS::RANGE_SEL = RANGE_FINAL + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, RANGE_FINAL); + txgbe_wr32_ephy(hw, addr, rdata); + + //17. Program following before enabling RXS. Purpose is to disable power-up FSM control on ADC offset adaptation + //Note: this step will be done in 2.3.3 RXS calibration and adaptation sequence + + //18. After this SEQ::RX_ENABLE can be done at any time. Note to ensure that ALIAS::RXS::RANGE_SEL = RANGE_FINAL configuration is retained. + //Rmove the OVRDEN on rxs0_rx0_samp_cal_done_o + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + } + //Do SEQ::RX_ENABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (speed == TXGBE_LINK_SPEED_40GB_FULL) + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0xf); + else + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + return status; +} + +static int E56phySetRxsUfineLeMax40G(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + unsigned int ULTRAFINE_CODE; + int i = 0; + unsigned int CMVAR_UFINE_MAX = 0; + u32 addr; + + for (i = 0; i < 4; i++) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL || speed == TXGBE_LINK_SPEED_40GB_FULL ) { + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + } + + //a. Assign software defined variables as below �C + //ii. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + //b. Perform the below logic sequence �C + while (ULTRAFINE_CODE > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE = ULTRAFINE_CODE - 1; + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + // Wait until 1milliseconds or greater + msleep(10); + } + } + return status; +} + +static int E56phySetRxsUfineLeMax(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + unsigned int ULTRAFINE_CODE; + + unsigned int CMVAR_UFINE_MAX = 0; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + } + + //a. Assign software defined variables as below �C + //ii. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + //b. Perform the below logic sequence �C + while (ULTRAFINE_CODE > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE = ULTRAFINE_CODE - 1; + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE); + //Set ovrd_en=1 to overide ASIC value + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i, 1); + // Wait until 1milliseconds or greater + msleep(10); + } + + return status; +} + +//-------------------------------------------------------------- +//compare function for qsort() +//-------------------------------------------------------------- +static int compare(const void *a, const void *b) +{ + const int *num1 = (const int *)a; + const int *num2 = (const int *)b; + + if (*num1 < *num2) { + return -1; + } else if (*num1 > *num2) { + return 1; + } else { + return 0; + } +} +static int E56phyRxRdSecondCode40g(struct txgbe_hw *hw, int *SECOND_CODE, int lane) +{ + int status = 0, i, N, median; + unsigned int rdata; + u32 addr; + int arraySize, RXS_BBCDR_SECOND_ORDER_ST[5]; + + + //Set ovrd_en=0 to read ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (lane * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_int_cstm_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //As status update from RXS hardware is asynchronous to read status of SECOND_ORDER, follow sequence mentioned below. + N =5; + for (i=0; i5degC after the CDR locks for the first time or after the +//ious time this sequence was run. It is recommended to call this sequence periodically (eg: once every 100ms) or trigger +// sequence if the temperature drifts by >=5degC. Temperature must be read from an on-die temperature sensor. +//-------------------------------------------------------------- +int txgbe_temp_track_seq_40g(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH ; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX ; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX ; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH ; + int CMVAR_UFINE_MIN ; + int CMVAR_FINE_MIN ; + int CMVAR_UFINE_UMIN_WRAP ; + int CMVAR_COARSE_MIN ; + int CMVAR_UFINE_FMIN_WRAP ; + int CMVAR_FINE_FMIN_WRAP ; + int i; + u32 addr; + int temp; + + struct txgbe_adapter *adapter = hw->back; + + for (i = 0; i < 4; i++) { + if(speed == TXGBE_LINK_SPEED_10GB_FULL || speed == TXGBE_LINK_SPEED_40GB_FULL) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP ; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP ; + } else { + printk("Error Speed\n"); + return 0; + } + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return 0; + + adapter->amlite_temp = temp; + + //Assign software defined variables as below �C + //a. SECOND_CODE = ALIAS::RXS::SECOND_ORDER + status |= E56phyRxRdSecondCode40g(hw, &SECOND_CODE, i); + + //b. COARSE_CODE = ALIAS::RXS::COARSE + //c. FINE_CODE = ALIAS::RXS::FINE + //d. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else { + printk("ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else { + printk("ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + } + return status; +} + + +//-------------------------------------------------------------- +//2.3.4 RXS post CDR lock temperature tracking sequence +// +//Below sequence must be run before the temperature drifts by >5degC after the CDR locks for the first time or after the +//ious time this sequence was run. It is recommended to call this sequence periodically (eg: once every 100ms) or trigger +// sequence if the temperature drifts by >=5degC. Temperature must be read from an on-die temperature sensor. +//-------------------------------------------------------------- +int txgbe_temp_track_seq(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + int status = 0; + unsigned int rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH ; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX ; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX ; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH ; + int CMVAR_UFINE_MIN ; + int CMVAR_FINE_MIN ; + int CMVAR_UFINE_UMIN_WRAP ; + int CMVAR_COARSE_MIN ; + int CMVAR_UFINE_FMIN_WRAP ; + int CMVAR_FINE_FMIN_WRAP ; + int temp; + + if(speed == TXGBE_LINK_SPEED_10GB_FULL) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP ; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP ; + } else { + printk("Error Speed\n"); + return 0; + } + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return 0; + + adapter->amlite_temp = temp; + + //Assign software defined variables as below �C + //a. SECOND_CODE = ALIAS::RXS::SECOND_ORDER + status |= E56phyRxRdSecondCode(hw, &SECOND_CODE); + + //b. COARSE_CODE = ALIAS::RXS::COARSE + //c. FINE_CODE = ALIAS::RXS::FINE + //d. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE + 1); + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + printk("ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE - 1); + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + printk("ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + + return status; +} + +static int E56phyCtleBypassSeq(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + + + //1. Program the following RXS registers as mentioned below. + //RXS::ANA_OVRDVAL[0]::ana_ctle_bypass_i = 1��b1 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_bypass_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + + //RXS::ANA_OVRDVAL[3]::ana_ctle_cz_cstm_i[4:0] = 0 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_cz_cstm_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + + //2. Program the following PDIG registers as mentioned below. + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_en_i = 1��b0 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_en_i = 1��b1 + // + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_done_o = 1��b1 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_done_o = 1��b1 + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + //1. Program the following RXS registers as mentioned below. + //RXS::ANA_OVRDVAL[0]::ana_ctle_bypass_i = 1��b1 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_bypass_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + + //RXS::ANA_OVRDVAL[3]::ana_ctle_cz_cstm_i[4:0] = 0 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_cz_cstm_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + + //2. Program the following PDIG registers as mentioned below. + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_en_i = 1��b0 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_en_i = 1��b1 + // + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_done_o = 1��b1 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_done_o = 1��b1 + EPHY_RREG(E56G__PMD_RXS1_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS1_OVRDVAL_1); + EPHY_RREG(E56G__PMD_RXS2_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS2_OVRDVAL_1); + EPHY_RREG(E56G__PMD_RXS3_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS3_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS1_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS1_OVRDEN_1, ovrd_en_rxs1_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS1_OVRDEN_1, ovrd_en_rxs1_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS1_OVRDEN_1); + EPHY_RREG(E56G__PMD_RXS2_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS2_OVRDEN_1, ovrd_en_rxs2_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS2_OVRDEN_1, ovrd_en_rxs2_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS2_OVRDEN_1); + EPHY_RREG(E56G__PMD_RXS3_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS3_OVRDEN_1, ovrd_en_rxs3_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS3_OVRDEN_1, ovrd_en_rxs3_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS3_OVRDEN_1); + + } + return status; +} + +static int E56phyRxsCalibAdaptSeq40G(struct txgbe_hw *hw, u32 speed) +{ + int status = 0, i, j; + u32 addr, timer; + u32 rdata = 0x0; + u32 bypassCtle = true; + + for (i = 0; i < 4; i++) { + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + + if (bypassCtle == 1) + E56phyCtleBypassSeq(hw, speed); + + //2. Follow sequence described in 2.3.2 RXS Osc Initialization for temperature tracking range here. RXS would be enabled at the end of this sequence. For the case when PAM4 KR training is not enabled (including PAM4 mode without KR training), wait until ALIAS::PDIG::CTRL_FSM_RX_ST would return RX_TRAIN_15_ST (RX_RDY_ST). + E56phyRxsOscInitForTempTrackRange(hw, speed); + + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + timer = 0; + rdata = 0; + while(EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx1_st) != E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx2_st) != E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx3_st) != E56PHY_RX_RDY_ST) { + rdata = rd32_ephy(hw, addr); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) { + //Do SEQ::RX_DISABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + return TXGBE_ERR_TIMEOUT; + } + } + + //RXS ADC adaptation sequence + //E56phyRxsAdcAdaptSeq + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS1_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS2_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS3_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + for (i = 0; i < 4; i++) { + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + timer = 0; + while(((rdata >> E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB) & 1) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(1000); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //6. Perform ADC offset adaptation and ADC gain adaptation, repeat them a few times and after that keep it disabled. + for(j = 0; j < 16; j++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_ofst_adapt_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_gain_adapt_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + + //7. Perform ADC interleaver adaptation for 10ms or greater, and after that disable it + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(10); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //8. Now re-enable VGA and CTLE trainings, so that it continues to adapt tracking changes in temperature or voltage + //<1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + // Set ALIAS::RXS::CTLE_TRAIN_EN = 0b1 + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_en_i) = 1; + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 1; + } + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_dfe_train_en_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + // + //EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 1; + //EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + // wait for ALIAS::RXS::CTLE_TRAIN_DONE = 1 + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + + if(bypassCtle == 0) { + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + } + ////Remove the OVERRIDE on ALIAS::RXS::FFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 0; + ////Remove the OVERRIDE on ALIAS::RXS::DFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + } + return status; +} + +static int E56phyRxsCalibAdaptSeq(struct txgbe_hw *hw, u32 speed) +{ + int status = 0, i; + u32 addr, timer; + u32 rdata = 0x0; + u32 bypassCtle = true; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + bypassCtle = false; + } else { + bypassCtle = true; + } + + if (hw->mac.type == txgbe_mac_aml) { + msleep(350); + rdata = rd32(hw, TXGBE_GPIO_EXT); + if (rdata & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) { + return TXGBE_ERR_PHY_INIT_NOT_DONE; + } + } + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + if (bypassCtle == 1) + E56phyCtleBypassSeq(hw, speed); + + //2. Follow sequence described in 2.3.2 RXS Osc Initialization for temperature tracking range here. RXS would be enabled at the end of this sequence. For the case when PAM4 KR training is not enabled (including PAM4 mode without KR training), wait until ALIAS::PDIG::CTRL_FSM_RX_ST would return RX_TRAIN_15_ST (RX_RDY_ST). + E56phyRxsOscInitForTempTrackRange(hw, speed); + + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + timer = 0; + rdata = 0; + while(EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != E56PHY_RX_RDY_ST) { + rdata = rd32_ephy(hw, addr); + udelay(500); + EPHY_RREG(E56G__PMD_CTRL_FSM_RX_STAT_0); + if (timer++ > PHYINIT_TIMEOUT) { + //Do SEQ::RX_DISABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + return TXGBE_ERR_TIMEOUT; + } + } + + //RXS ADC adaptation sequence + //E56phyRxsAdcAdaptSeq + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + timer = 0; + while(((rdata >> E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB) & 1) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(1000); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //6. Perform ADC offset adaptation and ADC gain adaptation, repeat them a few times and after that keep it disabled. + for(i = 0; i < 16; i++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_ofst_adapt_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_gain_adapt_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + + //7. Perform ADC interleaver adaptation for 10ms or greater, and after that disable it + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(10); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i, 0); + + //8. Now re-enable VGA and CTLE trainings, so that it continues to adapt tracking changes in temperature or voltage + //<1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + // Set ALIAS::RXS::CTLE_TRAIN_EN = 0b1 + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_en_i) = 1; + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 1; + } + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_dfe_train_en_i) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + // + //EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 1; + //EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + // wait for ALIAS::RXS::CTLE_TRAIN_DONE = 1 + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + + if(bypassCtle == 0) { + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + } + ////Remove the OVERRIDE on ALIAS::RXS::FFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 0; + ////Remove the OVERRIDE on ALIAS::RXS::DFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + return status; +} + +u32 txgbe_e56_cfg_temp(struct txgbe_hw *hw) +{ + u32 status; + u32 value; + int temp; + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + temp = DEFAULT_TEMP; + + if (temp < DEFAULT_TEMP) { + value = rd32_ephy(hw, CMS_ANA_OVRDEN0); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN0, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL2); + field_set(&value, 20, 16, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL2, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 12, 12, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL7); + field_set(&value, 8, 4, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL7, value); + } else if (temp > HIGH_TEMP) { + value = rd32_ephy(hw, CMS_ANA_OVRDEN0); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN0, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL2); + field_set(&value, 20, 16, 0x3); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL2, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 12, 12, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL7); + field_set(&value, 8, 4, 0x3); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL7, value); + } else { + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 4, 4, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL4); + field_set(&value, 24, 24, 0x1); + field_set(&value, 31, 29, 0x4); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL4, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL5); + field_set(&value, 1, 0, 0x0); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 23, 23, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL9); + field_set(&value, 24, 24, 0x1); + field_set(&value, 31, 29, 0x4); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL9, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL10); + field_set(&value, 1, 0, 0x0); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL10, value); + } + + return 0; +} + +int txgbe_e56_config_rx_40G(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + s32 status; + + status = E56phyRxsCalibAdaptSeq40G(hw, speed); + if (status) + return status; + + //Step 2 of 2.3.4 + E56phySetRxsUfineLeMax40G(hw, speed); + + //2.3.4 RXS post CDR lock temperature tracking sequence + txgbe_temp_track_seq_40g(hw, speed); + + adapter->link_valid = true; + return 0; +} + +static int txgbe_e56_config_rx(struct txgbe_hw *hw, u32 speed) +{ + s32 status; + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + txgbe_e56_config_rx_40G(hw, speed); + } else { + status = E56phyRxsCalibAdaptSeq(hw, speed); + if (status) + return status; + + //Step 2 of 2.3.4 + E56phySetRxsUfineLeMax(hw, speed); + + //2.3.4 RXS post CDR lock temperature tracking sequence + txgbe_temp_track_seq(hw, speed); + } + return 0; +} + +//-------------------------------------------------------------- +//2.2.10 SEQ::RX_DISABLE +//Use PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 to powerdown specific RXS lanes. +//Completion of RXS powerdown can be confirmed by observing ALIAS::PDIG::CTRL_FSM_RX_ST = POWERDN_ST +//-------------------------------------------------------------- +static int txgbe_e56_disable_rx40G(struct txgbe_hw *hw) +{ + int status = 0; + unsigned int rdata, timer; + unsigned int addr, temp; + int i; + + for (i = 0; i < 4; i++) { + //1. Disable OVERRIDE on below aliases + //a. ALIAS::RXS::RANGE_SEL + rdata = 0x0000; + addr = E56G__RXS0_ANA_OVRDEN_0_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_bbcdr_osc_range_sel_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + //b. ALIAS::RXS::COARSE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 0; + //c. ALIAS::RXS::FINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 0; + //d. ALIAS::RXS::ULTRAFINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //e. ALIAS::RXS::SAMP_CAL_DONE + addr = E56G__PMD_RXS0_OVRDEN_0_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_0, ovrd_en_rxs0_rx0_samp_cal_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //f. ALIAS::RXS::ADC_OFST_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i) = 0; + //g. ALIAS::RXS::ADC_GAIN_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_gain_adapt_en_i) = 0; + //j. ALIAS::RXS::ADC_INTL_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //h. ALIAS::RXS::ADC_INTL_CAL_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_en_i) = 0; + //i. ALIAS::RXS::ADC_INTL_CAL_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_done_o) = 0; + //k. ALIAS::RXS::CDR_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_cdr_en_i) = 0; + //l. ALIAS::RXS::VGA_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //m. ALIAS::RXS::CTLE_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + //p. ALIAS::RXS::RX_FETX_TRAIN_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_done_o) = 0; + //r. ALIAS::RXS::RX_TXFFE_COEFF_CHANGE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_coeff_change_o) = 0; + //s. ALIAS::RXS::RX_TXFFE_TRAIN_ENACK + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_enack_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_3_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //n. ALIAS::RXS::RX_FETX_MOD_TYPE + //o. ALIAS::RXS::RX_FETX_MOD_TYPE_UPDATE + temp = EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o) = temp & 0x8F; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_DIG_OVRDEN_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + //q. ALIAS::RXS::SLICER_THRESHOLD_OVRD_EN + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, top_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, mid_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, bot_comp_th_ovrd_en) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //2. Disable pattern checker �C + addr = E56G__RXS0_DFT_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_DFT_1, ber_en) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //3. Disable internal serial loopback mode �C + addr = E56G__RXS0_ANA_OVRDEN_3_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_3, ovrd_en_ana_sel_lpbk_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_2_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_en_adccal_lpbk_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //4. Enable bypass of clock gates in RXS - + addr = E56G__RXS0_RXS_CFG_0_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_RXS_CFG_0, train_clk_gate_bypass_en) = 0x1FFF; + txgbe_wr32_ephy(hw, addr, rdata); + } + + //5. Disable KR training mode �C + //a. ALIAS::PDIG::KR_TRAINING_MODE = 0b0 + addr = E56G__PMD_BASER_PMD_CONTROL_ADDR; + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln0) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln1) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln2) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln3) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //6. Disable RX to TX parallel loopback �C + //a. ALIAS::PDIG::RX_TO_TX_LPBK_EN = 0b0 + addr = E56G__PMD_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_PMD_CFG_5, rx_to_tx_lpbk_en) = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + //The FSM to disable RXS is present in PDIG. The FSM disables the RXS when �C + //PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_0, rx_en_cfg, 0); + + //Wait RX FSM to be POWERDN_ST + timer = 0; + + while (EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx1_st) != 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx2_st) != 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx3_st) != 0x21) { + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + udelay(100); + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + return status; +} + +//-------------------------------------------------------------- +//2.2.10 SEQ::RX_DISABLE +//Use PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 to powerdown specific RXS lanes. +//Completion of RXS powerdown can be confirmed by observing ALIAS::PDIG::CTRL_FSM_RX_ST = POWERDN_ST +//-------------------------------------------------------------- +static int txgbe_e56_disable_rx(struct txgbe_hw *hw) +{ + int status = 0; + unsigned int rdata, timer; + unsigned int addr, temp; + + //1. Disable OVERRIDE on below aliases + //a. ALIAS::RXS::RANGE_SEL + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_bbcdr_osc_range_sel_i, 0); + + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + //b. ALIAS::RXS::COARSE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 0; + //c. ALIAS::RXS::FINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 0; + //d. ALIAS::RXS::ULTRAFINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 0; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + + //e. ALIAS::RXS::SAMP_CAL_DONE + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_0, ovrd_en_rxs0_rx0_samp_cal_done_o, 0); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_2); + //f. ALIAS::RXS::ADC_OFST_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i) = 0; + //g. ALIAS::RXS::ADC_GAIN_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_gain_adapt_en_i) = 0; + //j. ALIAS::RXS::ADC_INTL_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_2); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //h. ALIAS::RXS::ADC_INTL_CAL_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_en_i) = 0; + //i. ALIAS::RXS::ADC_INTL_CAL_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_done_o) = 0; + //k. ALIAS::RXS::CDR_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_cdr_en_i) = 0; + //l. ALIAS::RXS::VGA_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //m. ALIAS::RXS::CTLE_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + //p. ALIAS::RXS::RX_FETX_TRAIN_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_done_o) = 0; + //r. ALIAS::RXS::RX_TXFFE_COEFF_CHANGE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_coeff_change_o) = 0; + //s. ALIAS::RXS::RX_TXFFE_TRAIN_ENACK + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_enack_o) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_3); + //n. ALIAS::RXS::RX_FETX_MOD_TYPE + //o. ALIAS::RXS::RX_FETX_MOD_TYPE_UPDATE + temp = EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o) = temp & 0x8F; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_3); + + //q. ALIAS::RXS::SLICER_THRESHOLD_OVRD_EN + EPHY_RREG(E56G__RXS0_DIG_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, top_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, mid_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, bot_comp_th_ovrd_en) = 0; + EPHY_WREG(E56G__RXS0_DIG_OVRDEN_1); + + //2. Disable pattern checker �C + txgbe_e56_ephy_config(E56G__RXS0_DFT_1, ber_en, 0); + + //3. Disable internal serial loopback mode �C + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_3, ovrd_en_ana_sel_lpbk_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_en_adccal_lpbk_i, 0); + + //4. Enable bypass of clock gates in RXS - + txgbe_e56_ephy_config(E56G__RXS0_RXS_CFG_0, train_clk_gate_bypass_en, 0x1FFF); + + //5. Disable KR training mode �C + //a. ALIAS::PDIG::KR_TRAINING_MODE = 0b0 + txgbe_e56_ephy_config(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln0, 0); + + //6. Disable RX to TX parallel loopback �C + //a. ALIAS::PDIG::RX_TO_TX_LPBK_EN = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_5, rx_to_tx_lpbk_en, 0); + + //The FSM to disable RXS is present in PDIG. The FSM disables the RXS when �C + //PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_0, rx_en_cfg, 0); + + //Wait RX FSM to be POWERDN_ST + timer = 0; + while(1) { + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if((rdata & 0x3f) == 0x21) { break; } + udelay(100); + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + return status; +} + +int txgbe_e56_reconfig_rx(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + u32 rdata; + u32 addr; + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + + if (hw->mac.type == txgbe_mac_aml) { + rdata = rd32(hw, TXGBE_GPIO_EXT); + if (rdata & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) { + return TXGBE_ERR_TIMEOUT; + } + } + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, 0x0); + + + if (hw->mac.type == txgbe_mac_aml40) { + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + txgbe_e56_disable_rx40G(hw); + status = txgbe_e56_config_rx_40G(hw, speed); + } else { + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + txgbe_e56_disable_rx(hw); + status = txgbe_e56_config_rx(hw, speed); + } + + addr = E56PHY_INTR_0_ADDR; + txgbe_wr32_ephy(hw, addr, E56PHY_INTR_0_IDLE_ENTRY1); + + addr = E56PHY_INTR_1_ADDR; + txgbe_wr32_ephy(hw, addr, E56PHY_INTR_1_IDLE_EXIT1); + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + TCALL(hw, mac.ops.enable_sec_tx_path); + + return status; +} + +//Reference setting code for SFP mode +int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value = 0; + u32 ppl_lock = false; + int status = 0; + u32 reset = 0; + + if ((rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK) + == TXGBE_EPHY_STAT_PPL_LOCK) { + ppl_lock = true; + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + } + TCALL(hw, mac.ops.disable_tx_laser); + + if (hw->bus.lan_id == 0) { + reset = TXGBE_MIS_RST_LAN0_EPHY_RST; + } else { + reset = TXGBE_MIS_RST_LAN1_EPHY_RST; + } + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + /////////////////////////// XLGPCS REGS Start + value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); + value |= 0x8000; + txgbe_wr32_epcs(hw, VR_PCS_DIG_CTRL1, value); + + udelay(1000); + value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); + if ((value & 0x8000)) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + TCALL(hw, mac.ops.enable_tx_laser); + goto out; + } + + value = txgbe_rd32_epcs(hw, SR_AN_CTRL); + field_set(&value, 12, 12, 0); + txgbe_wr32_epcs(hw, SR_AN_CTRL, value); + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + field_set(&value, 5, 2, 0x3); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + field_set(&value, 3, 0, 0x4); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&value, 29, 29, 0x1); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&value, 30, 30, 0x1); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&value, 18, 8, 0x294); + field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&value, 30, 28, 0x7); + field_set(&value, 26, 24, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 14, 12, 0x5); + field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&value, 26, 24, 0x5); + field_set(&value, 10, 8, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 2, 0, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + field_set(&value, 4, 0, 0x2); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_40g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + field_set(&value, 21, 20, 0x3); + field_set(&value, 19, 12, 0xf); //TX_EN set + field_set(&value, 8, 8, 0x0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + field_set(&value, 5, 2, 5); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + field_set(&value, 3, 0, 7); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = txgbe_rd32_epcs(hw, SR_PMA_CTRL2); + field_set(&value, 6, 0, 0x39); + txgbe_wr32_epcs(hw, SR_PMA_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&value, 29, 29, 0x1); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + //Update to 0 from SNPS for PIN CLKP/N: Enable the termination of the input buffer + field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&value, 30, 30, 0x1); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&value, 18, 8, 0x294); + field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&value, 30, 28, 0x7); + field_set(&value, 26, 24, 0x5); + field_set(&value, 18, 16, 0x3); + field_set(&value, 14, 12, 0x5); + field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&value, 26, 24, 0x5); + field_set(&value, 10, 8, 0x5); + field_set(&value, 18, 16, 0x3); + field_set(&value, 2, 0, 0x3); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + field_set(&value, 4, 0, 0x9); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_25g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + field_set(&value, 21, 20, 0x3); + field_set(&value, 19, 12, 0x1); //TX_EN set + field_set(&value, 8, 8, 0x0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + field_set(&value, 5, 2, 0); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + field_set(&value, 3, 0, 0); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = txgbe_rd32_epcs(hw, SR_PMA_CTRL2); + field_set(&value, 6, 0, 0xb); + txgbe_wr32_epcs(hw, SR_PMA_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&value, 29, 29, 0x1); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&value, 30, 30, 0x1); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&value, 18, 8, 0x294); + field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&value, 30, 28, 0x7); + field_set(&value, 26, 24, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 14, 12, 0x5); + field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&value, 26, 24, 0x5); + field_set(&value, 10, 8, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 2, 0, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + field_set(&value, 4, 0, 0x2); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_10g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + field_set(&value, 21, 20, 0x3); + field_set(&value, 19, 12, 0x1); //TX_EN set + field_set(&value, 8, 8, 0x0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + TCALL(hw, mac.ops.enable_tx_laser); + + status = txgbe_e56_config_rx(hw, speed); + + value = rd32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR); + field_set(&value, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0x28); + field_set(&value, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0xa); + txgbe_wr32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR, value); + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + if (adapter->fec_link_mode != TXGBE_PHY_FEC_AUTO) { + adapter->cur_fec_link = adapter->fec_link_mode; + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + } + + if (status) + goto out; + +out: + if (ppl_lock) { + TCALL(hw, mac.ops.enable_sec_tx_path); + } + + return status; +} + +int txgbe_get_cur_fec_mode(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value = 0; + + mutex_lock(&adapter->e56_lock); + value = txgbe_rd32_epcs(hw, SR_PMA_RS_FEC_CTRL); + mutex_unlock(&adapter->e56_lock); + + if (value & 0x4) + return TXGBE_PHY_FEC_RS; + + mutex_lock(&adapter->e56_lock); + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + mutex_unlock(&adapter->e56_lock); + + if (value & 0x1) + return TXGBE_PHY_FEC_BASER; + + return TXGBE_PHY_FEC_OFF; +} + +int txgbe_e56_set_fec_mode(struct txgbe_hw *hw, u8 fec_mode) +{ + u32 value = 0; + + if (fec_mode & TXGBE_PHY_FEC_RS) { + //disable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + + //enable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x68c1); + txgbe_wr32_epcs(hw, 0x180a4, 0x3321); + txgbe_wr32_epcs(hw, 0x180a5, 0x973e); + txgbe_wr32_epcs(hw, 0x180a6, 0xccde); + + txgbe_wr32_epcs(hw, 0x38018, 1024); + value = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&value, 2, 2, 1); + txgbe_wr32_epcs(hw, 0x100c8, value); + } else if (fec_mode & TXGBE_PHY_FEC_BASER) { + //disable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x7690); + txgbe_wr32_epcs(hw, 0x180a4, 0x3347); + txgbe_wr32_epcs(hw, 0x180a5, 0x896f); + txgbe_wr32_epcs(hw, 0x180a6, 0xccb8); + txgbe_wr32_epcs(hw, 0x38018, 0x3fff); + value = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&value, 2, 2, 0); + txgbe_wr32_epcs(hw, 0x100c8, value); + + //enable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + field_set(&value, 0, 0, 1); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + } else { + //disable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x7690); + txgbe_wr32_epcs(hw, 0x180a4, 0x3347); + txgbe_wr32_epcs(hw, 0x180a5, 0x896f); + txgbe_wr32_epcs(hw, 0x180a6, 0xccb8); + txgbe_wr32_epcs(hw, 0x38018, 0x3fff); + value = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&value, 2, 2, 0); + txgbe_wr32_epcs(hw, 0x100c8, value); + + //disable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + } + + return 0; +} + +int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up) +{ + struct txgbe_adapter *adapter = hw->back; + int i = 0, j = 0; + u32 speed; + + do { + if (!(adapter->fec_link_mode & BIT(j))) { + j += 1; + continue; + } + + adapter->cur_fec_link = adapter->fec_link_mode & BIT(j); + + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + mutex_unlock(&adapter->e56_lock); + + for (i = 0; i < 4; i++) { + msleep(250); + txgbe_e56_check_phy_link(hw, &speed, link_up); + if (*link_up) + return 0; + } + + j += 1; + } while (j < 3); + + return 0; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h new file mode 100644 index 0000000000000000000000000000000000000000..4c03564ff1859692cbcc404283ffb92e0f743b70 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h @@ -0,0 +1,1809 @@ +#ifndef _TXGBE_E56_H_ +#define _TXGBE_E56_H_ + +#include "txgbe_type.h" +#include "txgbe.h" + +#define EPHY_RREG(REG) \ +do {\ + rdata = 0; \ + rdata = rd32_ephy(hw, REG##_ADDR); \ +} while(0) + +#define EPHY_WREG(REG) \ +do { \ + txgbe_wr32_ephy(hw, REG##_ADDR, rdata); \ +} while(0) + +#define EPCS_RREG(REG) \ +do {\ + rdata = 0; \ + rdata = txgbe_rd32_epcs(hw, REG##_ADDR); \ +} while(0) + +#define EPCS_WREG(REG) \ +do { \ + txgbe_wr32_epcs(hw, REG##_ADDR, rdata); \ +} while(0) + +#define txgbe_e56_ephy_config(reg, field, val) \ +do { \ + EPHY_RREG(reg); \ + EPHY_XFLD(reg, field) = (val); \ + EPHY_WREG(reg); \ +} while(0) + +#define txgbe_e56_epcs_config(reg, field, val) \ +do { \ + EPCS_RREG(reg); \ + EPCS_XFLD(reg, field) = (val); \ + EPCS_WREG(reg); \ +} while(0) + +//-------------------------------- +//LAN GPIO define for SFP+ module +//-------------------------------- +//-- Fields +#define SFP1_RS0 5,5 +#define SFP1_RS1 4,4 +#define SFP1_RX_LOS 3,3 +#define SFP1_MOD_ABS 2,2 +#define SFP1_TX_DISABLE 1,1 +#define SFP1_TX_FAULT 0,0 +#define EPHY_XFLD(REG, FLD) ((REG *)&rdata)->FLD +#define EPCS_XFLD(REG, FLD) ((REG *)&rdata)->FLD + +typedef union { + struct { + u32 ana_refclk_buf_daisy_en_i : 1; + u32 ana_refclk_buf_pad_en_i : 1; + u32 ana_vddinoff_dcore_dig_o : 1; + u32 ana_lcpll_en_clkout_hf_left_top_i : 1; + u32 ana_lcpll_en_clkout_hf_right_top_i : 1; + u32 ana_lcpll_en_clkout_hf_left_bot_i : 1; + u32 ana_lcpll_en_clkout_hf_right_bot_i : 1; + u32 ana_lcpll_en_clkout_lf_left_top_i : 1; + u32 ana_lcpll_en_clkout_lf_right_top_i : 1; + u32 ana_lcpll_en_clkout_lf_left_bot_i : 1; + u32 ana_lcpll_en_clkout_lf_right_bot_i : 1; + u32 ana_bg_en_i : 1; + u32 ana_en_rescal_i : 1; + u32 ana_rescal_comp_o : 1; + u32 ana_en_ldo_core_i : 1; + u32 ana_lcpll_hf_en_bias_i : 1; + u32 ana_lcpll_hf_en_loop_i : 1; + u32 ana_lcpll_hf_en_cp_i : 1; + u32 ana_lcpll_hf_set_lpf_i : 1; + u32 ana_lcpll_hf_en_vco_i : 1; + u32 ana_lcpll_hf_vco_amp_status_o : 1; + u32 ana_lcpll_hf_en_odiv_i : 1; + u32 ana_lcpll_lf_en_bias_i : 1; + u32 ana_lcpll_lf_en_loop_i : 1; + u32 ana_lcpll_lf_en_cp_i : 1; + u32 ana_lcpll_lf_set_lpf_i : 1; + u32 ana_lcpll_lf_en_vco_i : 1; + u32 ana_lcpll_lf_vco_amp_status_o : 1; + u32 ana_lcpll_lf_en_odiv_i : 1; + u32 ana_lcpll_hf_refclk_select_i : 1; + u32 ana_lcpll_lf_refclk_select_i : 1; + u32 rsvd0 : 1; + }; + u32 reg; +} E56G_CMS_ANA_OVRDVAL_0; + +#define E56G_CMS_ANA_OVRDVAL_0_ADDR 0xcb0 +/* AMLITE ETH PHY Registers */ +#define SR_PMA_KR_FEC_CTRL 0x100ab +#define SR_AN_CTRL 0x70000 +#define VR_PCS_DIG_CTRL1 0x38000 +#define SR_PCS_CTRL1 0x30000 +#define SR_PCS_CTRL2 0x30007 +#define SR_PMA_CTRL2 0x10007 +#define VR_PCS_DIG_CTRL3 0x38003 +#define VR_PMA_CTRL3 0x180a8 +#define VR_PMA_CTRL4 0x180a9 +#define SR_PMA_RS_FEC_CTRL 0x100c8 +#define CMS_ANA_OVRDEN0 0xca4 +#define ANA_OVRDEN1 0xca8 +#define ANA_OVRDVAL0 0xcb0 +#define ANA_OVRDVAL5 0xcc4 +#define OSC_CAL_N_CDR4 0x14 +#define PLL0_CFG0 0xc10 +#define PLL0_CFG2 0xc18 +#define PLL0_DIV_CFG0 0xc1c +#define PLL1_CFG0 0xc48 +#define PLL1_CFG2 0xc50 +#define CMS_PIN_OVRDEN0 0xc8c +#define CMS_PIN_OVRDVAL0 0xc94 +#define DATAPATH_CFG0 0x142c +#define DATAPATH_CFG1 0x1430 +#define AN_CFG1 0x1438 +#define SPARE52 0x16fc +#define RXS_CFG0 0x000 +#define PMD_CFG0 0x1400 +#define SR_PCS_STS1 0x30001 +#define PMD_CTRL_FSM_TX_STAT0 0x14dc +#define CMS_ANA_OVRDEN0 0xca4 +#define CMS_ANA_OVRDEN1 0xca8 +#define CMS_ANA_OVRDVAL2 0xcb8 +#define CMS_ANA_OVRDVAL4 0xcc0 +#define CMS_ANA_OVRDVAL5 0xcc4 +#define CMS_ANA_OVRDVAL7 0xccc +#define CMS_ANA_OVRDVAL9 0xcd4 +#define CMS_ANA_OVRDVAL10 0xcd8 + +#define TXS_TXS_CFG1 0x804 +#define TXS_WKUP_CNT 0x808 +#define TXS_PIN_OVRDEN0 0x80c +#define TXS_PIN_OVRDVAL6 0x82c +#define TXS_ANA_OVRDVAL1 0x854 + +#define E56PHY_CMS_BASE_ADDR 0x0C00 + +#define E56PHY_CMS_PIN_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0x8C) +#define E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I 12,12 + +#define E56PHY_CMS_PIN_OVRDVAL_0_ADDR (E56PHY_CMS_BASE_ADDR+0x94) +#define E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I 10,10 + +#define E56PHY_CMS_ANA_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0xA4) + +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I 29,29 + + +#define E56PHY_CMS_ANA_OVRDEN_1_ADDR (E56PHY_CMS_BASE_ADDR+0xA8) +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I 4,4 + +#define E56PHY_CMS_ANA_OVRDVAL_2_ADDR (E56PHY_CMS_BASE_ADDR+0xB8) + +#define E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I 31,28 + +#define E56PHY_CMS_ANA_OVRDVAL_4_ADDR (E56PHY_CMS_BASE_ADDR+0xC0) + + +#define E56PHY_TXS_BASE_ADDR 0x0800 +#define E56PHY_TXS1_BASE_ADDR 0x0900 +#define E56PHY_TXS2_BASE_ADDR 0x0A00 +#define E56PHY_TXS3_BASE_ADDR 0x0B00 +#define E56PHY_TXS_OFFSET 0x0100 + +#define E56PHY_PMD_RX_OFFSET 0x02C + +#define E56PHY_TXS_TXS_CFG_1_ADDR (E56PHY_TXS_BASE_ADDR+0x04) +#define E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256 7,4 +#define E56PHY_TXS_WKUP_CNT_ADDR (E56PHY_TXS_BASE_ADDR+0x08) +#define E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32 7,0 +#define E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32 15,8 + + +#define E56PHY_TXS_PIN_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR+0x0C) +#define E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I 28,28 + +#define E56PHY_TXS_PIN_OVRDVAL_6_ADDR (E56PHY_TXS_BASE_ADDR+0x2C) + +#define E56PHY_TXS_ANA_OVRDVAL_1_ADDR (E56PHY_TXS_BASE_ADDR+0x54) +#define E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I 23,8 + +#define E56PHY_TXS_ANA_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR+0x44) +#define E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I 13,13 + +#define E56PHY_RXS_BASE_ADDR 0x0000 +#define E56PHY_RXS1_BASE_ADDR 0x0200 +#define E56PHY_RXS2_BASE_ADDR 0x0400 +#define E56PHY_RXS3_BASE_ADDR 0x0600 +#define E56PHY_RXS_OFFSET 0x0200 + +#define E56PHY_RXS_RXS_CFG_0_ADDR (E56PHY_RXS_BASE_ADDR+0x000) +#define E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL 1,1 +#define E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN 17,4 + +#define E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR (E56PHY_RXS_BASE_ADDR+0x008) +#define E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1 15,0 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1_LSB 0 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1 31,16 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1_LSB 16 + + +#define E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR (E56PHY_RXS_BASE_ADDR+0x014) +#define E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1 3,2 +#define E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT 18,8 +#define E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1 21,21 +#define E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1 27,26 + +#define E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR (E56PHY_RXS_BASE_ADDR+0x018) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH 3,2 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK 15,12 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK 19,16 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK 23,20 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK 27,24 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT 30,28 + +#define E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR (E56PHY_RXS_BASE_ADDR+0x01C) +#define E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK 3,0 +#define E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK 7,4 + +#define E56PHY_RXS_INTL_CONFIG_0_ADDR (E56PHY_RXS_BASE_ADDR+0x020) +#define E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1 31,16 + +#define E56PHY_RXS_INTL_CONFIG_2_ADDR (E56PHY_RXS_BASE_ADDR+0x028) +#define E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1 1,1 + +#define E56PHY_RXS_TXFFE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x02C) +#define E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH 18,12 +#define E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH 26,20 + +#define E56PHY_RXS_TXFFE_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR+0x030) +#define E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH 8,0 +#define E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH 20,12 + +#define E56PHY_RXS_TXFFE_TRAINING_2_ADDR (E56PHY_RXS_BASE_ADDR+0x034) +#define E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH 8,0 +#define E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH 20,12 + + +#define E56PHY_RXS_TXFFE_TRAINING_3_ADDR (E56PHY_RXS_BASE_ADDR+0x038) +#define E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH 8,0 +#define E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH 20,12 +#define E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE 26,21 + +#define E56PHY_RXS_VGA_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x04C) +#define E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET 18,12 + + +#define E56PHY_RXS_VGA_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR+0x050) +#define E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0 4,0 +#define E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0 12,8 +#define E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123 20,16 +#define E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123 28,24 + +#define E56PHY_RXS_CTLE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x054) +#define E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0 24,20 +#define E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123 31,27 + +#define E56PHY_RXS_CTLE_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR+0x058) +#define E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT 24,0 + +#define E56PHY_RXS_CTLE_TRAINING_2_ADDR (E56PHY_RXS_BASE_ADDR+0x05C) +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1 5,0 +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2 13,8 +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3 21,16 + + +#define E56PHY_RXS_CTLE_TRAINING_3_ADDR (E56PHY_RXS_BASE_ADDR+0x060) +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1 9,8 +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2 11,10 +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3 13,12 + +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR (E56PHY_RXS_BASE_ADDR+0x064) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT 5,4 +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT 9,8 +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8 31,28 + +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR (E56PHY_RXS_BASE_ADDR+0x068) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG 31,28 + +#define E56PHY_RXS_FFE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x070) +#define E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN 23,8 + +#define E56PHY_RXS_IDLE_DETECT_1_ADDR (E56PHY_RXS_BASE_ADDR+0x088) +#define E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX 22,16 +#define E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN 30,24 + +#define E56PHY_RXS_ANA_OVRDEN_0_ADDR (E56PHY_RXS_BASE_ADDR+0x08C) +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I 0,0 +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_TRIM_RTERM_I 1,1 +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I 29,29 + +#define E56PHY_RXS_ANA_OVRDEN_1_ADDR (E56PHY_RXS_BASE_ADDR+0x090) +#define E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I 0,0 +#define E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I 9,9 + +#define E56PHY_RXS_ANA_OVRDEN_3_ADDR (E56PHY_RXS_BASE_ADDR+0x098) +#define E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I 15,15 +#define E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I 25,25 + +#define E56PHY_RXS_ANA_OVRDEN_4_ADDR (E56PHY_RXS_BASE_ADDR+0x09C) +#define E56PHY_RXS_ANA_OVRDVAL_0_ADDR (E56PHY_RXS_BASE_ADDR+0x0A0) +#define E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I 0,0 + +#define E56PHY_RXS_ANA_OVRDVAL_6_ADDR (E56PHY_RXS_BASE_ADDR+0x0B8) +#define E56PHY_RXS_ANA_OVRDVAL_14_ADDR (E56PHY_RXS_BASE_ADDR+0x0D8) +#define E56PHY_RXS_ANA_OVRDVAL_15_ADDR (E56PHY_RXS_BASE_ADDR+0x0DC) +#define E56PHY_RXS_ANA_OVRDVAL_17_ADDR (E56PHY_RXS_BASE_ADDR+0x0E4) +#define E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I 18,16 + +#define E56PHY_RXS_EYE_SCAN_1_ADDR (E56PHY_RXS_BASE_ADDR+0x1A4) +#define E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER 31,0 + +#define E56PHY_RXS_ANA_OVRDVAL_5_ADDR (E56PHY_RXS_BASE_ADDR+0x0B4) +#define E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I 1,0 + +#define E56PHY_RXS_RINGO_0_ADDR (E56PHY_RXS_BASE_ADDR+0x1FC) + +#define E56PHY_PMD_BASE_ADDR 0x1400 +#define E56PHY_PMD_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR+0x000) +#define E56PHY_PMD_CFG_0_RX_EN_CFG 19,16 + +#define E56PHY_PMD_CFG_3_ADDR (E56PHY_PMD_BASE_ADDR+0x00C) +#define E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K 31,24 +#define E56PHY_PMD_CFG_4_ADDR (E56PHY_PMD_BASE_ADDR+0x010) +#define E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K 7,0 +#define E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K 15,8 +#define E56PHY_PMD_CFG_5_ADDR (E56PHY_PMD_BASE_ADDR+0x014) +#define E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET 12,12 +#define E56PHY_CTRL_FSM_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR+0x040) +#define E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_OFST_CAL_ERR 4,4 +#define E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR 5,5 +#define E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL 9,8 +#define E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN 31,24 + + +#define E56PHY_CTRL_FSM_CFG_1_ADDR (E56PHY_PMD_BASE_ADDR+0x044) +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096 7,0 +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_2_ADDR (E56PHY_PMD_BASE_ADDR+0x048) +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096 7,0 +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_3_ADDR (E56PHY_PMD_BASE_ADDR+0x04C) +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096 7,0 + +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_4_ADDR (E56PHY_PMD_BASE_ADDR+0x050) +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096 7,0 +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_7_ADDR (E56PHY_PMD_BASE_ADDR+0x05C) +#define E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN 15,0 +#define E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_8_ADDR (E56PHY_PMD_BASE_ADDR+0x060) +#define E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_12_ADDR (E56PHY_PMD_BASE_ADDR+0x070) +#define E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_13_ADDR (E56PHY_PMD_BASE_ADDR+0x074) +#define E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN 15,0 +#define E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_14_ADDR (E56PHY_PMD_BASE_ADDR+0x078) +#define E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_15_ADDR (E56PHY_PMD_BASE_ADDR+0x07C) +#define E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN 15,0 + +#define E56PHY_CTRL_FSM_CFG_17_ADDR (E56PHY_PMD_BASE_ADDR+0x084) +#define E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN 15,0 + +#define E56PHY_CTRL_FSM_CFG_18_ADDR (E56PHY_PMD_BASE_ADDR+0x088) +#define E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN 15,0 + +#define E56PHY_CTRL_FSM_CFG_29_ADDR (E56PHY_PMD_BASE_ADDR+0x0B4) +#define E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_33_ADDR (E56PHY_PMD_BASE_ADDR+0x0C4) +#define E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL 15,0 +#define E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL 31,16 + +#define E56PHY_CTRL_FSM_CFG_34_ADDR (E56PHY_PMD_BASE_ADDR+0x0C8) +#define E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL 15,0 +#define E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL 31,16 + +#define E56PHY_CTRL_FSM_RX_STAT_0_ADDR (E56PHY_PMD_BASE_ADDR+0x0FC) +#define E56PHY_RXS0_OVRDEN_0_ADDR (E56PHY_PMD_BASE_ADDR+0x130) +#define E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O 27,27 + +#define E56PHY_RXS0_OVRDEN_1_ADDR (E56PHY_PMD_BASE_ADDR+0x134) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I 14,14 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I 16,16 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CDR_EN_I 18,18 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I 23,23 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O 24,24 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB 24 + + +#define E56PHY_RXS0_OVRDEN_2_ADDR (E56PHY_PMD_BASE_ADDR+0x138) +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I 0,0 +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I 3,3 +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I 6,6 + +#define E56PHY_RXS0_OVRDVAL_0_ADDR (E56PHY_PMD_BASE_ADDR+0x140) +#define E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O 22,22 + +#define E56PHY_RXS0_OVRDVAL_1_ADDR (E56PHY_PMD_BASE_ADDR+0x144) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I 7,7 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I 9,9 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CDR_EN_I 11,11 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I 16,16 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O 17,17 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB 17 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I 25,25 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I 28,28 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I 31,31 + +#define E56PHY_INTR_0_IDLE_ENTRY1 0x10000000 +#define E56PHY_INTR_0_ADDR (E56PHY_PMD_BASE_ADDR+0x1EC) +#define E56PHY_INTR_0_ENABLE_ADDR (E56PHY_PMD_BASE_ADDR+0x1E0) + +#define E56PHY_INTR_1_IDLE_EXIT1 0x1 +#define E56PHY_INTR_1_ADDR (E56PHY_PMD_BASE_ADDR+0x1F0) +#define E56PHY_INTR_1_ENABLE_ADDR (E56PHY_PMD_BASE_ADDR+0x1E4) + +#define E56PHY_KRT_TFSM_CFG_ADDR (E56PHY_PMD_BASE_ADDR+0x2B8) +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K 7,0 +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K 15,8 +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K 23,16 + +#define E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR+0x2BC) +#define E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2 9,8 +#define E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_3 13,12 + +#define PHYINIT_TIMEOUT 1000 //PHY initialization timeout value in 0.5ms unit + +#define E56G__BASEADDR 0x0 + +typedef union { + struct { + u32 ana_lcpll_lf_vco_swing_ctrl_i : 4; + u32 ana_lcpll_lf_lpf_setcode_calib_i : 5; + u32 rsvd0 : 3; + u32 ana_lcpll_lf_vco_coarse_bin_i : 5; + u32 rsvd1 : 3; + u32 ana_lcpll_lf_vco_fine_therm_i : 8; + u32 ana_lcpll_lf_clkout_fb_ctrl_i : 2; + u32 rsvd2 : 2; + }; + u32 reg; +} E56G_CMS_ANA_OVRDVAL_7; +#define E56G_CMS_ANA_OVRDVAL_7_ADDR (E56G__BASEADDR+0xccc) + +typedef union { + struct { + u32 ovrd_en_ana_lcpll_hf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_hf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_lf_en_bias_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_loop_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_cp_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_base_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_fine_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_setcode_calib_i : 1; + u32 ovrd_en_ana_lcpll_lf_set_lpf_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_vco_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_coarse_bin_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_fine_therm_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_lf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_hf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_lf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_hf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_test_slicer_i : 1; + u32 ovrd_en_ana_test_sampler_i : 1; + }; + u32 reg; +} E56G_CMS_ANA_OVRDEN_1; + +#define E56G_CMS_ANA_OVRDEN_1_ADDR (E56G__BASEADDR+0xca8) + +typedef union { + struct { + u32 ana_lcpll_lf_test_in_i : 32; + }; + u32 reg; +} E56G_CMS_ANA_OVRDVAL_9; + +#define E56G_CMS_ANA_OVRDVAL_9_ADDR (E56G__BASEADDR+0xcd4) + +typedef union { + struct { + u32 ovrd_en_ana_bbcdr_vcofilt_byp_i : 1; + u32 ovrd_en_ana_bbcdr_coarse_i : 1; + u32 ovrd_en_ana_bbcdr_fine_i : 1; + u32 ovrd_en_ana_bbcdr_ultrafine_i : 1; + u32 ovrd_en_ana_en_bbcdr_i : 1; + u32 ovrd_en_ana_bbcdr_divctrl_i : 1; + u32 ovrd_en_ana_bbcdr_int_cstm_i : 1; + u32 ovrd_en_ana_bbcdr_prop_step_i : 1; + u32 ovrd_en_ana_en_bbcdr_clk_i : 1; + u32 ovrd_en_ana_test_bbcdr_i : 1; + u32 ovrd_en_ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ovrd_en_ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_270_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_270_o : 1; + u32 ovrd_en_ana_en_bbcdr_samp_dac_i : 1; + u32 ovrd_en_ana_bbcdr_dac0_i : 1; + u32 ovrd_en_ana_bbcdr_dac90_i : 1; + u32 ovrd_en_ana_vga2_cload_in_cstm_i : 1; + u32 ovrd_en_ana_intlvr_cut_bw_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_1; + +#define E56G__RXS0_ANA_OVRDEN_1_ADDR (E56G__BASEADDR+0x90) + +//-----Access structure typedef for Register:E56G__RXS0_OSC_CAL_N_CDR_0 +typedef union { + struct { + u32 prediv0 : 16; + u32 target_cnt0 : 16; + }; + u32 reg; +} E56G_RXS0_OSC_CAL_N_CDR_0; +//-----MACRO defines for Register:E56G__RXS0_OSC_CAL_N_CDR_0 +#define E56G_RXS0_OSC_CAL_N_CDR_0_ADDR (E56G__BASEADDR+0x4) + +typedef union { + struct { + u32 osc_range_sel0 : 2; + u32 osc_range_sel1 : 2; + u32 osc_range_sel2 : 2; + u32 osc_range_sel3 : 2; + u32 vco_code_init : 11; + u32 calibrate_range_sel : 1; + u32 osc_current_boost_en0 : 1; + u32 osc_current_boost_en1 : 1; + u32 osc_current_boost_en2 : 1; + u32 osc_current_boost_en3 : 1; + u32 bbcdr_current_boost0 : 2; + u32 bbcdr_current_boost1 : 2; + u32 bbcdr_current_boost2 : 2; + u32 bbcdr_current_boost3 : 2; + }; + u32 reg; +} E56G_RXS0_OSC_CAL_N_CDR_4; +//-----MACRO defines for Register:E56G__RXS0_OSC_CAL_N_CDR_4 +#define E56G_RXS0_OSC_CAL_N_CDR_4_ADDR (E56G__BASEADDR+0x14) + +//-----Access structure typedef for Register:E56G__RXS0_INTL_CONFIG_0 +typedef union { + struct { + u32 adc_intl2slice_delay0 : 16; + u32 adc_intl2slice_delay1 : 16; + }; + u32 reg; +} E56G_RXS0_INTL_CONFIG_0; +//-----MACRO defines for Register:E56G__RXS0_INTL_CONFIG_0 +#define E56G_RXS0_INTL_CONFIG_0_ADDR (E56G__BASEADDR+0x20) + +//-----Access structure typedef for Register:E56G__RXS0_INTL_CONFIG_2 +typedef union { + struct { + u32 interleaver_hbw_disable0 : 1; + u32 interleaver_hbw_disable1 : 1; + u32 interleaver_hbw_disable2 : 1; + u32 interleaver_hbw_disable3 : 1; + u32 rsvd0 : 28; + }; + u32 reg; +} E56G_RXS0_INTL_CONFIG_2; +//-----MACRO defines for Register:E56G__RXS0_INTL_CONFIG_2 +#define E56G_RXS0_INTL_CONFIG_2_ADDR (E56G__BASEADDR+0x28) + +typedef union { + struct { + u32 ovrd_en_ana_bbcdr_dac180_i : 1; + u32 ovrd_en_ana_bbcdr_dac270_i : 1; + u32 ovrd_en_ana_bbcdr_en_samp_cal_cnt_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_samp_cal_cnt_i : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_0_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_90_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_180_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_270_o : 1; + u32 ovrd_en_ana_en_adcbuf1_i : 1; + u32 ovrd_en_ana_test_adcbuf1_i : 1; + u32 ovrd_en_ana_en_adc_clk4ui_i : 1; + u32 ovrd_en_ana_adc_clk_skew0_i : 1; + u32 ovrd_en_ana_adc_clk_skew90_i : 1; + u32 ovrd_en_ana_adc_clk_skew180_i : 1; + u32 ovrd_en_ana_adc_clk_skew270_i : 1; + u32 ovrd_en_ana_adc_update_skew_i : 1; + u32 ovrd_en_ana_en_adc_pi_i : 1; + u32 ovrd_en_ana_adc_pictrl_quad_i : 1; + u32 ovrd_en_ana_adc_pctrl_code_i : 1; + u32 ovrd_en_ana_adc_clkdiv_i : 1; + u32 ovrd_en_ana_test_adc_clkgen_i : 1; + u32 ovrd_en_ana_en_adc_i : 1; + u32 ovrd_en_ana_en_adc_vref_i : 1; + u32 ovrd_en_ana_vref_cnfg_i : 1; + u32 ovrd_en_ana_adc_data_cstm_o : 1; + u32 ovrd_en_ana_en_adccal_lpbk_i : 1; + u32 ovrd_en_ana_sel_adcoffset_cal_i : 1; + u32 ovrd_en_ana_sel_adcgain_cal_i : 1; + u32 ovrd_en_ana_adcgain_cal_swing_ctrl_i : 1; + u32 ovrd_en_ana_adc_gain_i : 1; + u32 ovrd_en_ana_vga_cload_out_cstm_i : 1; + u32 ovrd_en_ana_vga2_cload_out_cstm_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_2; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDEN_2 +#define E56G__RXS0_ANA_OVRDEN_2_ADDR (E56G__BASEADDR+0x94) + +//-----Access structure typedef for Register:E56G__RXS0_ANA_OVRDEN_3 +typedef union { + struct { + u32 ovrd_en_ana_adc_offset_i : 1; + u32 ovrd_en_ana_adc_slice_addr_i : 1; + u32 ovrd_en_ana_slice_wr_i : 1; + u32 ovrd_en_ana_test_adc_i : 1; + u32 ovrd_en_ana_test_adc_o : 1; + u32 ovrd_en_ana_spare_o : 8; + u32 ovrd_en_ana_sel_lpbk_i : 1; + u32 ovrd_en_ana_ana_debug_sel_i : 1; + u32 ovrd_en_ana_anabs_config_i : 1; + u32 ovrd_en_ana_en_anabs_i : 1; + u32 ovrd_en_ana_anabs_rxn_o : 1; + u32 ovrd_en_ana_anabs_rxp_o : 1; + u32 ovrd_en_ana_dser_clk_en_i : 1; + u32 ovrd_en_ana_dser_clk_config_i : 1; + u32 ovrd_en_ana_en_mmcdr_clk_obs_i : 1; + u32 ovrd_en_ana_skew_coarse0_fine1_i : 1; + u32 ovrd_en_ana_vddinoff_acore_dig_o : 1; + u32 ovrd_en_ana_vddinoff_dcore_dig_o : 1; + u32 ovrd_en_ana_vga2_boost_cstm_i : 1; + u32 ovrd_en_ana_adc_sel_vbgr_bias_i : 1; + u32 ovrd_en_ana_adc_nbuf_cnfg_i : 1; + u32 ovrd_en_ana_adc_pbuf_cnfg_i : 1; + u32 rsvd0 : 3; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_3; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDEN_3 +#define E56G__RXS0_ANA_OVRDEN_3_NUM 1 +#define E56G__RXS0_ANA_OVRDEN_3_ADDR (E56G__BASEADDR+0x98) + +//-----Access structure typedef for Register:E56G__RXS0_RXS_CFG_0 +typedef union { + struct { + u32 pam4_ab_swap_en : 1; + u32 dser_data_sel : 1; + u32 signal_type : 1; + u32 precode_en : 1; + u32 train_clk_gate_bypass_en : 14; + u32 rsvd0 : 14; + }; + u32 reg; +} E56G__RXS0_RXS_CFG_0; +//-----MACRO defines for Register:E56G__RXS0_RXS_CFG_0 +#define E56G__RXS0_RXS_CFG_0_NUM 1 +#define E56G__RXS0_RXS_CFG_0_ADDR (E56G__BASEADDR+0x0) + +//-----Access structure typedef for Register:E56G__PMD_BASER_PMD_CONTROL +typedef union { + struct { + u32 restart_training_ln0 : 1; + u32 training_enable_ln0 : 1; + u32 restart_training_ln1 : 1; + u32 training_enable_ln1 : 1; + u32 restart_training_ln2 : 1; + u32 training_enable_ln2 : 1; + u32 restart_training_ln3 : 1; + u32 training_enable_ln3 : 1; + u32 rsvd0 : 24; + }; + u32 reg; +} E56G__PMD_BASER_PMD_CONTROL; +//-----MACRO defines for Register:E56G__PMD_BASER_PMD_CONTROL +#define E56G__PMD_BASER_PMD_CONTROL_NUM 1 +#define E56G__PMD_BASER_PMD_CONTROL_ADDR (E56G__BASEADDR+0x1640) + +//-----Access structure typedef for Register:E56G__PMD_PMD_CFG_5 +typedef union { + struct { + u32 rx_to_tx_lpbk_en : 4; + u32 sel_wp_pmt_out : 4; + u32 sel_wp_pmt_clkout : 4; + u32 use_recent_marker_offset : 1; + u32 interrupt_debug_mode : 1; + u32 rsvd0 : 2; + u32 tx_ffe_coeff_update : 4; + u32 rsvd1 : 12; + }; + u32 reg; +} E56G__PMD_PMD_CFG_5; +//-----MACRO defines for Register:E56G__PMD_PMD_CFG_5 +#define E56G__PMD_PMD_CFG_5_NUM 1 +#define E56G__PMD_PMD_CFG_5_ADDR (E56G__BASEADDR+0x1414) + +//-----Access structure typedef for Register:E56G__PMD_PMD_CFG_0 +typedef union { + struct { + u32 soft_reset : 1; + u32 pmd_en : 1; + u32 rsvd0 : 2; + u32 pll_refclk_sel : 2; + u32 rsvd1 : 2; + u32 pmd_mode : 1; + u32 rsvd2 : 3; + u32 tx_en_cfg : 4; + u32 rx_en_cfg : 4; + u32 pll_en_cfg : 2; + u32 rsvd3 : 2; + u32 pam4_precode_no_krt_en : 4; + u32 rsvd4 : 4; + }; + u32 reg; +} E56G__PMD_PMD_CFG_0; +//-----MACRO defines for Register:E56G__PMD_PMD_CFG_0 +#define E56G__PMD_PMD_CFG_0_NUM 1 +#define E56G__PMD_PMD_CFG_0_ADDR (E56G__BASEADDR+0x1400) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_2 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_samp_th_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_th_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_efuse_bits_i : 1; + u32 ovrd_en_rxs0_rx0_wp_pmt_in_i : 1; + u32 ovrd_en_rxs0_rx0_wp_pmt_out_o : 1; + u32 rsvd0 : 15; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_2; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_2 +#define E56G__PMD_RXS0_OVRDEN_2_ADDR (E56G__BASEADDR+0x1538) + +typedef union { + struct { + u32 ana_bbcdr_osc_range_sel_i : 2; + u32 rsvd0 : 2; + u32 ana_bbcdr_coarse_i : 4; + u32 ana_bbcdr_fine_i : 3; + u32 rsvd1 : 1; + u32 ana_bbcdr_ultrafine_i : 3; + u32 rsvd2 : 1; + u32 ana_bbcdr_divctrl_i : 2; + u32 rsvd3 : 2; + u32 ana_bbcdr_int_cstm_i : 5; + u32 rsvd4 : 3; + u32 ana_bbcdr_prop_step_i : 4; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_5; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDVAL_5 +#define E56G__RXS0_ANA_OVRDVAL_5_ADDR (E56G__BASEADDR+0xb4) + +typedef union { + struct { + u32 ana_adc_pictrl_quad_i : 2; + u32 rsvd0 : 2; + u32 ana_adc_clkdiv_i : 2; + u32 rsvd1 : 2; + u32 ana_test_adc_clkgen_i : 4; + u32 ana_vref_cnfg_i : 4; + u32 ana_adcgain_cal_swing_ctrl_i : 4; + u32 ana_adc_gain_i : 4; + u32 ana_adc_offset_i : 4; + u32 ana_ana_debug_sel_i : 4; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_11; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDVAL_11 +#define E56G__RXS3_ANA_OVRDVAL_11_ADDR (E56G__BASEADDR+0x6cc) + +typedef union { + struct { + u32 rxs0_rx0_fe_ofst_cal_error_o : 1; + u32 rxs0_rx0_fom_en_i : 1; + u32 rxs0_rx0_idle_detect_en_i : 1; + u32 rxs0_rx0_idle_o : 1; + u32 rxs0_rx0_txffe_train_en_i : 1; + u32 rxs0_rx0_txffe_train_enack_o : 1; + u32 rxs0_rx0_txffe_train_done_o : 1; + u32 rxs0_rx0_vga_train_en_i : 1; + u32 rxs0_rx0_vga_train_done_o : 1; + u32 rxs0_rx0_ctle_train_en_i : 1; + u32 rxs0_rx0_ctle_train_done_o : 1; + u32 rxs0_rx0_cdr_en_i : 1; + u32 rxs0_rx0_cdr_rdy_o : 1; + u32 rxs0_rx0_ffe_train_en_i : 1; + u32 rxs0_rx0_ffe_train_done_o : 1; + u32 rxs0_rx0_mmpd_en_i : 1; + u32 rxs0_rx0_adc_intl_cal_en_i : 1; + u32 rxs0_rx0_adc_intl_cal_done_o : 1; + u32 rxs0_rx0_adc_intl_cal_error_o : 1; + u32 rxs0_rx0_dfe_train_en_i : 1; + u32 rxs0_rx0_dfe_train_done_o : 1; + u32 rxs0_rx0_vga_adapt_en_i : 1; + u32 rxs0_rx0_vga_adapt_done_o : 1; + u32 rxs0_rx0_ctle_adapt_en_i : 1; + u32 rxs0_rx0_ctle_adapt_done_o : 1; + u32 rxs0_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs0_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs0_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs0_rx0_adc_gain_adapt_en_i : 1; + u32 rxs0_rx0_adc_gain_adapt_done_o : 1; + u32 rxs0_rx0_adc_gain_adapt_error_o : 1; + u32 rxs0_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDVAL_1; +#define E56G__PMD_RXS0_OVRDVAL_1_ADDR (E56G__BASEADDR+0x1544) + +//-----Access structure typedef for Register:E56G__PMD_RXS1_OVRDVAL_1 +typedef union { + struct { + u32 rxs1_rx0_fe_ofst_cal_error_o : 1; + u32 rxs1_rx0_fom_en_i : 1; + u32 rxs1_rx0_idle_detect_en_i : 1; + u32 rxs1_rx0_idle_o : 1; + u32 rxs1_rx0_txffe_train_en_i : 1; + u32 rxs1_rx0_txffe_train_enack_o : 1; + u32 rxs1_rx0_txffe_train_done_o : 1; + u32 rxs1_rx0_vga_train_en_i : 1; + u32 rxs1_rx0_vga_train_done_o : 1; + u32 rxs1_rx0_ctle_train_en_i : 1; + u32 rxs1_rx0_ctle_train_done_o : 1; + u32 rxs1_rx0_cdr_en_i : 1; + u32 rxs1_rx0_cdr_rdy_o : 1; + u32 rxs1_rx0_ffe_train_en_i : 1; + u32 rxs1_rx0_ffe_train_done_o : 1; + u32 rxs1_rx0_mmpd_en_i : 1; + u32 rxs1_rx0_adc_intl_cal_en_i : 1; + u32 rxs1_rx0_adc_intl_cal_done_o : 1; + u32 rxs1_rx0_adc_intl_cal_error_o : 1; + u32 rxs1_rx0_dfe_train_en_i : 1; + u32 rxs1_rx0_dfe_train_done_o : 1; + u32 rxs1_rx0_vga_adapt_en_i : 1; + u32 rxs1_rx0_vga_adapt_done_o : 1; + u32 rxs1_rx0_ctle_adapt_en_i : 1; + u32 rxs1_rx0_ctle_adapt_done_o : 1; + u32 rxs1_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs1_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs1_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs1_rx0_adc_gain_adapt_en_i : 1; + u32 rxs1_rx0_adc_gain_adapt_done_o : 1; + u32 rxs1_rx0_adc_gain_adapt_error_o : 1; + u32 rxs1_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS1_OVRDVAL_1; +//-----MACRO defines for Register:E56G__PMD_RXS1_OVRDVAL_1 +#define E56G__PMD_RXS1_OVRDVAL_1_ADDR (E56G__BASEADDR+0x1570) + +//-----Access structure typedef for Register:E56G__PMD_RXS2_OVRDVAL_1 +typedef union { + struct { + u32 rxs2_rx0_fe_ofst_cal_error_o : 1; + u32 rxs2_rx0_fom_en_i : 1; + u32 rxs2_rx0_idle_detect_en_i : 1; + u32 rxs2_rx0_idle_o : 1; + u32 rxs2_rx0_txffe_train_en_i : 1; + u32 rxs2_rx0_txffe_train_enack_o : 1; + u32 rxs2_rx0_txffe_train_done_o : 1; + u32 rxs2_rx0_vga_train_en_i : 1; + u32 rxs2_rx0_vga_train_done_o : 1; + u32 rxs2_rx0_ctle_train_en_i : 1; + u32 rxs2_rx0_ctle_train_done_o : 1; + u32 rxs2_rx0_cdr_en_i : 1; + u32 rxs2_rx0_cdr_rdy_o : 1; + u32 rxs2_rx0_ffe_train_en_i : 1; + u32 rxs2_rx0_ffe_train_done_o : 1; + u32 rxs2_rx0_mmpd_en_i : 1; + u32 rxs2_rx0_adc_intl_cal_en_i : 1; + u32 rxs2_rx0_adc_intl_cal_done_o : 1; + u32 rxs2_rx0_adc_intl_cal_error_o : 1; + u32 rxs2_rx0_dfe_train_en_i : 1; + u32 rxs2_rx0_dfe_train_done_o : 1; + u32 rxs2_rx0_vga_adapt_en_i : 1; + u32 rxs2_rx0_vga_adapt_done_o : 1; + u32 rxs2_rx0_ctle_adapt_en_i : 1; + u32 rxs2_rx0_ctle_adapt_done_o : 1; + u32 rxs2_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs2_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs2_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs2_rx0_adc_gain_adapt_en_i : 1; + u32 rxs2_rx0_adc_gain_adapt_done_o : 1; + u32 rxs2_rx0_adc_gain_adapt_error_o : 1; + u32 rxs2_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS2_OVRDVAL_1; +//-----MACRO defines for Register:E56G__PMD_RXS2_OVRDVAL_1 +#define E56G__PMD_RXS2_OVRDVAL_1_ADDR (E56G__BASEADDR+0x159c) + +//-----Access structure typedef for Register:E56G__PMD_RXS3_OVRDVAL_1 +typedef union { + struct { + u32 rxs3_rx0_fe_ofst_cal_error_o : 1; + u32 rxs3_rx0_fom_en_i : 1; + u32 rxs3_rx0_idle_detect_en_i : 1; + u32 rxs3_rx0_idle_o : 1; + u32 rxs3_rx0_txffe_train_en_i : 1; + u32 rxs3_rx0_txffe_train_enack_o : 1; + u32 rxs3_rx0_txffe_train_done_o : 1; + u32 rxs3_rx0_vga_train_en_i : 1; + u32 rxs3_rx0_vga_train_done_o : 1; + u32 rxs3_rx0_ctle_train_en_i : 1; + u32 rxs3_rx0_ctle_train_done_o : 1; + u32 rxs3_rx0_cdr_en_i : 1; + u32 rxs3_rx0_cdr_rdy_o : 1; + u32 rxs3_rx0_ffe_train_en_i : 1; + u32 rxs3_rx0_ffe_train_done_o : 1; + u32 rxs3_rx0_mmpd_en_i : 1; + u32 rxs3_rx0_adc_intl_cal_en_i : 1; + u32 rxs3_rx0_adc_intl_cal_done_o : 1; + u32 rxs3_rx0_adc_intl_cal_error_o : 1; + u32 rxs3_rx0_dfe_train_en_i : 1; + u32 rxs3_rx0_dfe_train_done_o : 1; + u32 rxs3_rx0_vga_adapt_en_i : 1; + u32 rxs3_rx0_vga_adapt_done_o : 1; + u32 rxs3_rx0_ctle_adapt_en_i : 1; + u32 rxs3_rx0_ctle_adapt_done_o : 1; + u32 rxs3_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs3_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs3_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs3_rx0_adc_gain_adapt_en_i : 1; + u32 rxs3_rx0_adc_gain_adapt_done_o : 1; + u32 rxs3_rx0_adc_gain_adapt_error_o : 1; + u32 rxs3_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS3_OVRDVAL_1; +//-----MACRO defines for Register:E56G__PMD_RXS3_OVRDVAL_1 +#define E56G__PMD_RXS3_OVRDVAL_1_ADDR (E56G__BASEADDR+0x15c8) + +//-----Access structure typedef for Register:E56G__PMD_CTRL_FSM_RX_STAT_0 +typedef union { + struct { + u32 ctrl_fsm_rx0_st : 6; + u32 rsvd0 : 2; + u32 ctrl_fsm_rx1_st : 6; + u32 rsvd1 : 2; + u32 ctrl_fsm_rx2_st : 6; + u32 rsvd2 : 2; + u32 ctrl_fsm_rx3_st : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_CTRL_FSM_RX_STAT_0; +//-----MACRO defines for Register:E56G__PMD_CTRL_FSM_RX_STAT_0 +#define E56G__PMD_CTRL_FSM_RX_STAT_0_ADDR (E56G__BASEADDR+0x14fc) + +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_0; +#define E56G__RXS0_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0xa0) + +//-----Access structure typedef for Register:E56G__RXS1_ANA_OVRDVAL_0 +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDVAL_0; +//-----MACRO defines for Register:E56G__RXS1_ANA_OVRDVAL_0 +#define E56G__RXS1_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0x2a0) + +//-----Access structure typedef for Register:E56G__RXS2_ANA_OVRDVAL_0 +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDVAL_0; +//-----MACRO defines for Register:E56G__RXS2_ANA_OVRDVAL_0 +#define E56G__RXS2_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0x4a0) + +//-----Access structure typedef for Register:E56G__RXS3_ANA_OVRDVAL_0 +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_0; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDVAL_0 +#define E56G__RXS3_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0x6a0) + +//-----Access structure typedef for Register:E56G__RXS0_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDEN_0 +#define E56G__RXS0_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x8c) + +//-----Access structure typedef for Register:E56G__RXS1_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS1_ANA_OVRDEN_0 +#define E56G__RXS1_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x28c) + +//-----Access structure typedef for Register:E56G__RXS2_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS2_ANA_OVRDEN_0 +#define E56G__RXS2_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x48c) + +//-----Access structure typedef for Register:E56G__RXS3_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDEN_0 +#define E56G__RXS3_ANA_OVRDEN_0_NUM 1 +#define E56G__RXS3_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x68c) + +//-----Access structure typedef for Register:E56G__RXS0_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDVAL_3 +#define E56G__RXS0_ANA_OVRDVAL_3_NUM 1 +#define E56G__RXS0_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0xac) + +//-----Access structure typedef for Register:E56G__RXS1_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS1_ANA_OVRDVAL_3 +#define E56G__RXS1_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0x2ac) + +//-----Access structure typedef for Register:E56G__RXS2_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS2_ANA_OVRDVAL_3 +#define E56G__RXS2_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0x4ac) + +//-----Access structure typedef for Register:E56G__RXS3_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDVAL_3 +#define E56G__RXS3_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0x6ac) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_rstn_i : 1; + u32 ovrd_en_rxs0_rx0_bitclk_divctrl_i : 1; + u32 ovrd_en_rxs0_rx0_bitclk_rate_i : 1; + u32 ovrd_en_rxs0_rx0_symdata_width_i : 1; + u32 ovrd_en_rxs0_rx0_symdata_o : 1; + u32 ovrd_en_rxs0_rx0_precode_en_i : 1; + u32 ovrd_en_rxs0_rx0_signal_type_i : 1; + u32 ovrd_en_rxs0_rx0_sync_detect_en_i : 1; + u32 ovrd_en_rxs0_rx0_sync_o : 1; + u32 ovrd_en_rxs0_rx0_rate_select_i : 1; + u32 ovrd_en_rxs0_rx0_rterm_en_i : 1; + u32 ovrd_en_rxs0_rx0_bias_en_i : 1; + u32 ovrd_en_rxs0_rx0_ldo_en_i : 1; + u32 ovrd_en_rxs0_rx0_ldo_rdy_i : 1; + u32 ovrd_en_rxs0_rx0_blwc_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_sel_i : 1; + u32 ovrd_en_rxs0_rx0_osc_en_i : 1; + u32 ovrd_en_rxs0_rx0_clkgencdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctlecdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_osc_freq_error_o : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_err_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_error_o : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_0; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_0 +#define E56G__PMD_RXS0_OVRDEN_0_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_0_ADDR (E56G__BASEADDR+0x1530) + +//-----Access structure typedef for Register:E56G__RXS0_DFT_1 +typedef union { + struct { + u32 ber_en : 1; + u32 rsvd0 : 3; + u32 read_mode_en : 1; + u32 rsvd1 : 3; + u32 err_cnt_mode_all0_one1 : 1; + u32 rsvd2 : 3; + u32 init_lfsr_mode_continue0_restart1 : 1; + u32 rsvd3 : 3; + u32 pattern_sel : 4; + u32 rsvd4 : 12; + }; + u32 reg; +} E56G__RXS0_DFT_1; +//-----MACRO defines for Register:E56G__RXS0_DFT_1 +#define E56G__RXS0_DFT_1_NUM 1 +#define E56G__RXS0_DFT_1_ADDR (E56G__BASEADDR+0xec) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_fom_en_i : 1; + u32 ovrd_en_rxs0_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs0_rx0_idle_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs0_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs0_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_1 +#define E56G__PMD_RXS0_OVRDEN_1_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_1_ADDR (E56G__BASEADDR+0x1534) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_3 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_sparein_i : 8; + u32 ovrd_en_rxs0_rx0_spareout_o : 8; + u32 rsvd0 : 16; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_3; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_3 +#define E56G__PMD_RXS0_OVRDEN_3_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_3_ADDR (E56G__BASEADDR+0x153c) + +//-----Access structure typedef for Register:E56G__RXS0_DIG_OVRDEN_1 +typedef union { + struct { + u32 vco_code_cont_adj_done_ovrd_en : 1; + u32 dfe_coeffl_ovrd_en : 1; + u32 dfe_coeffh_ovrd_en : 1; + u32 rsvd0 : 1; + u32 top_comp_th_ovrd_en : 1; + u32 mid_comp_th_ovrd_en : 1; + u32 bot_comp_th_ovrd_en : 1; + u32 rsvd1 : 1; + u32 level_target_ovrd_en : 4; + u32 ffe_coeff_c0to3_ovrd_en : 4; + u32 ffe_coeff_c4to7_ovrd_en : 4; + u32 ffe_coeff_c8to11_ovrd_en : 4; + u32 ffe_coeff_c12to15_ovrd_en : 4; + u32 ffe_coeff_update_ovrd_en : 1; + u32 rsvd2 : 3; + }; + u32 reg; +} E56G__RXS0_DIG_OVRDEN_1; +//-----MACRO defines for Register:E56G__RXS0_DIG_OVRDEN_1 +#define E56G__RXS0_DIG_OVRDEN_1_NUM 1 +#define E56G__RXS0_DIG_OVRDEN_1_ADDR (E56G__BASEADDR+0x160) + +//-----Access structure typedef for Register:E56G__PMD_RXS1_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs1_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_fom_en_i : 1; + u32 ovrd_en_rxs1_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs1_rx0_idle_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs1_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs1_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs1_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs1_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs1_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs1_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS1_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS1_OVRDEN_1 +#define E56G__PMD_RXS1_OVRDEN_1_ADDR (E56G__BASEADDR+0x1560) + +//-----Access structure typedef for Register:E56G__PMD_RXS2_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs2_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_fom_en_i : 1; + u32 ovrd_en_rxs2_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs2_rx0_idle_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs2_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs2_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs2_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs2_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs2_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs2_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS2_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS2_OVRDEN_1 +#define E56G__PMD_RXS2_OVRDEN_1_ADDR (E56G__BASEADDR+0x158c) + +//-----Access structure typedef for Register:E56G__PMD_RXS3_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs3_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_fom_en_i : 1; + u32 ovrd_en_rxs3_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs3_rx0_idle_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs3_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs3_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs3_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs3_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs3_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs3_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS3_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS3_OVRDEN_1 +#define E56G__PMD_RXS3_OVRDEN_1_ADDR (E56G__BASEADDR+0x15b8) + +#define E56G__RXS0_FOM_18__ADDR (E56G__BASEADDR+0x1f8) +#define E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB 11 +#define E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB 0 +#define E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB 23 +#define E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB 12 +#define E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB 25 +#define E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB 25 + +#define DEFAULT_TEMP 40 +#define HIGH_TEMP 70 + +#define E56PHY_RX_RDY_ST 0x1B + +#define S10G_CMVAR_RANGE_H 0x3 +#define S10G_CMVAR_RANGE_L 0x2 +#define S25G_CMVAR_RANGE_H 0x1 +#define S25G_CMVAR_RANGE_L 0x0 + +#define S25G_CMVAR_RANGE_H 0x1 +#define S25G_CMVAR_RANGE_L 0x0 +#define S25G_CMVAR_SEC_LOW_TH 0x1A +#define S25G_CMVAR_SEC_HIGH_TH 0x1D +#define S25G_CMVAR_UFINE_MAX 0x2 +#define S25G_CMVAR_FINE_MAX 0x7 +#define S25G_CMVAR_COARSE_MAX 0xF +#define S25G_CMVAR_UFINE_UMAX_WRAP 0x0 +#define S25G_CMVAR_UFINE_FMAX_WRAP 0x0 +#define S25G_CMVAR_FINE_FMAX_WRAP 0x2 +#define S25G_CMVAR_UFINE_MIN 0x0 +#define S25G_CMVAR_FINE_MIN 0x0 +#define S25G_CMVAR_COARSE_MIN 0x1 +#define S25G_CMVAR_UFINE_UMIN_WRAP 0x2 +#define S25G_CMVAR_UFINE_FMIN_WRAP 0x2 +#define S25G_CMVAR_FINE_FMIN_WRAP 0x5 + +#define S10G_CMVAR_RANGE_H 0x3 +#define S10G_CMVAR_RANGE_L 0x2 +#define S10G_CMVAR_SEC_LOW_TH 0x1A +#define S10G_CMVAR_SEC_HIGH_TH 0x1D +#define S10G_CMVAR_UFINE_MAX 0x7 +#define S10G_CMVAR_FINE_MAX 0x7 +#define S10G_CMVAR_COARSE_MAX 0xF +#define S10G_CMVAR_UFINE_UMAX_WRAP 0x6 +#define S10G_CMVAR_UFINE_FMAX_WRAP 0x7 +#define S10G_CMVAR_FINE_FMAX_WRAP 0x1 +#define S10G_CMVAR_UFINE_MIN 0x0 +#define S10G_CMVAR_FINE_MIN 0x0 +#define S10G_CMVAR_COARSE_MIN 0x1 +#define S10G_CMVAR_UFINE_UMIN_WRAP 0x2 +#define S10G_CMVAR_UFINE_FMIN_WRAP 0x2 +#define S10G_CMVAR_FINE_FMIN_WRAP 0x5 + +#define S10G_TX_FFE_CFG_MAIN 0x2c2c2c2c +#define S10G_TX_FFE_CFG_PRE1 0x0 +#define S10G_TX_FFE_CFG_PRE2 0x0 +#define S10G_TX_FFE_CFG_POST 0x6060606 +#define S25G_TX_FFE_CFG_MAIN 49 +#define S25G_TX_FFE_CFG_PRE1 4 +#define S25G_TX_FFE_CFG_PRE2 1 +#define S25G_TX_FFE_CFG_POST 9 + +/* for dac test*/ +#define S25G_TX_FFE_CFG_DAC_MAIN 0x2a +#define S25G_TX_FFE_CFG_DAC_PRE1 0x3 +#define S25G_TX_FFE_CFG_DAC_PRE2 0x0 +#define S25G_TX_FFE_CFG_DAC_POST 0x11 + +#define BYPASS_CTLE_TAG 0x0 + +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT1 0x1 +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT2 0x0 +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT3 0x0 +#define S10G_PHY_RX_CTLE_TAP_FRACP1 0x18 +#define S10G_PHY_RX_CTLE_TAP_FRACP2 0x0 +#define S10G_PHY_RX_CTLE_TAP_FRACP3 0x0 + +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT1 0x1 +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT2 0x0 +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT3 0x0 +#define S25G_PHY_RX_CTLE_TAP_FRACP1 0x18 +#define S25G_PHY_RX_CTLE_TAP_FRACP2 0x0 +#define S25G_PHY_RX_CTLE_TAP_FRACP3 0x0 + +#define TXGBE_E56_PHY_LINK_UP 0x4 + +#define __bf_shf_m(x) (__builtin_ffsll(x) - 1) + +#define FIELD_PREP_M(_mask, _val) \ + ({ \ + ((typeof(_mask))(_val) << __bf_shf_m(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET_M() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET_M() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET_M(_mask, _reg) \ + ({ \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf_m(_mask)); \ + }) + +void field_set(u32 *psrcdata, u32 bithigh, u32 bitlow, u32 setvalue); +int E56phyRxRdSecondCode(struct txgbe_hw *hw, int *SECOND_CODE); +u32 txgbe_e56_cfg_25g(struct txgbe_hw *hw); +u32 txgbe_e56_cfg_10g(struct txgbe_hw *hw); +u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw); + +int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed); +u32 txgbe_e56_cfg_temp(struct txgbe_hw *hw); +int txgbe_e56_get_temp(struct txgbe_hw *hw, int *pTempData); +int txgbe_e56_reconfig_rx(struct txgbe_hw *hw, u32 speed); +int txgbe_e56_config_rx_40G(struct txgbe_hw *hw, u32 speed); +int txgbe_temp_track_seq(struct txgbe_hw *hw, u32 speed); +int txgbe_temp_track_seq_40g(struct txgbe_hw *hw, u32 speed); +int txgbe_get_cur_fec_mode(struct txgbe_hw *hw); +int txgbe_e56_set_fec_mode(struct txgbe_hw *hw, u8 fec_mode); +int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up); +s32 txgbe_e56_check_phy_link(struct txgbe_hw *hw, u32 *speed, + bool *link_up); + +#endif /* _TXGBE_E56_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c new file mode 100644 index 0000000000000000000000000000000000000000..6b6a99ba7bc544203167b08de4cf88f78c26a2d1 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c @@ -0,0 +1,2679 @@ +#include "txgbe_e56.h" +#include "txgbe_hw.h" + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_e56_bp.h" +#include "txgbe_bp.h" + +static int E56phySetRxsUfineLeMax(struct txgbe_adapter *adapter, u32 speed) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata, addr; + u32 ULTRAFINE_CODE[4] = {0}; + int lane_num = 0, lane_idx = 0; + u32 CMVAR_UFINE_MAX = 0; + + switch (speed) { + case 10: + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + lane_num = 1; + break; + case 40: + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + lane_num = 4; + break; + case 25: + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + /* ii get rx ana_bbcdr_ultrafine_i[14, 12] per lane */ + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + ULTRAFINE_CODE[lane_idx] = FIELD_GET_M(GENMASK(14, 12), rdata); + kr_dbg(KR_MODE, "ULTRAFINE_CODE[%d] = %d, CMVAR_UFINE_MAX: %x\n", + lane_idx, ULTRAFINE_CODE[lane_idx], CMVAR_UFINE_MAX); + } + + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + //b. Perform the below logic sequence + while (ULTRAFINE_CODE[lane_idx] > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE[lane_idx] -= 1; + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 14, 12, ULTRAFINE_CODE[lane_idx]); + txgbe_wr32_ephy(hw, addr, rdata); + + /* ovrd_en_ana_bbcdr_ultrafine=1 override ASIC value */ + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_wr32_ephy(hw, addr, rdata | BIT(3)); + + // Wait until 1milliseconds or greater + usec_delay(1000); + } + } + + return 0; +} + +static int E56phyRxsOscInitForTempTrackRange(struct txgbe_adapter *adapter, + u32 speed) +{ + int OFFSET_CENTRE_RANGE_H[4] = {0}, OFFSET_CENTRE_RANGE_L[4] = {}, RANGE_FINAL[4] = {}; + int RX_COARSE_MID_TD, CMVAR_RANGE_H = 0, CMVAR_RANGE_L = 0; + struct txgbe_hw *hw = &adapter->hw; + int status = 0, lane_num = 0; + int T = 40, lane_id = 0; + u32 addr, rdata; + + /* Set CMVAR_RANGE_H/L based on the link speed mode */ + switch (speed) { + case 10: + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + lane_num = 1; + break; + case 40: + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + lane_num = 4; + break; + case 25: + CMVAR_RANGE_H = S25G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S25G_CMVAR_RANGE_L; + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + /* 1. Read the temperature T just before RXS is enabled. */ + txgbe_e56_get_temp(hw, &T); + + /* 2. Define software variable RX_COARSE_MID_TD */ + if (T < -5) + RX_COARSE_MID_TD = 10; + else if (T < 30) + RX_COARSE_MID_TD = 9; + else if (T < 65) + RX_COARSE_MID_TD = 8; + else if (T < 100) + RX_COARSE_MID_TD = 7; + else + RX_COARSE_MID_TD = 6; + + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, CMVAR_RANGE_H); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x08c + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 29, 29, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1540 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 22, 22, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 27, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x09090909 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 1 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_H[lane_id] = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_H[lane_id] > RX_COARSE_MID_TD) + OFFSET_CENTRE_RANGE_H[lane_id] = OFFSET_CENTRE_RANGE_H[lane_id] - + RX_COARSE_MID_TD; + else + OFFSET_CENTRE_RANGE_H[lane_id] = RX_COARSE_MID_TD - + OFFSET_CENTRE_RANGE_H[lane_id]; + } + + //7. Do SEQ::RX_DISABLE to disable RXS. + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x21212121 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 2 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + rdata = rd32_ephy(hw, 0x15ec); + txgbe_wr32_ephy(hw, 0x15ec, rdata); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, CMVAR_RANGE_L); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x08c + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 29, 29, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1540 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 22, 22, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 27, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0xf); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x09090909 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 3 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_L[lane_id] = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_L[lane_id] > RX_COARSE_MID_TD) + OFFSET_CENTRE_RANGE_L[lane_id] = OFFSET_CENTRE_RANGE_L[lane_id] - + RX_COARSE_MID_TD; + else + OFFSET_CENTRE_RANGE_L[lane_id] = RX_COARSE_MID_TD - + OFFSET_CENTRE_RANGE_L[lane_id]; + } + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + RANGE_FINAL[lane_id] = OFFSET_CENTRE_RANGE_L[lane_id] < + OFFSET_CENTRE_RANGE_H[lane_id] ? + CMVAR_RANGE_L : CMVAR_RANGE_H; + kr_dbg(KR_MODE, "lane_id:%d-RANGE_L:%x-RANGE_H:%x-RANGE_FINAL:%x\n", + lane_id, OFFSET_CENTRE_RANGE_L[lane_id], + OFFSET_CENTRE_RANGE_H[lane_id], RANGE_FINAL[lane_id]); + } + + //7. Do SEQ::RX_DISABLE to disable RXS. + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x21212121 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 4 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + rdata = rd32_ephy(hw, 0x15ec); + txgbe_wr32_ephy(hw, 0x15ec, rdata); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, RANGE_FINAL[lane_id]); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 0, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 3, 3, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 23, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 17, 17, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 31, 31, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 6, 6, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 27, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + + //Do SEQ::RX_ENABLE + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1400, rdata); + + return status; +} + +static int E56phyRxsPostCdrLockTempTrackSeq(struct txgbe_adapter *adapter, + u32 speed) +{ + struct txgbe_hw *hw = &adapter->hw; + + int status = 0; + u32 rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH ; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX ; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX ; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH ; + int CMVAR_UFINE_MIN ; + int CMVAR_FINE_MIN ; + int CMVAR_UFINE_UMIN_WRAP ; + int CMVAR_COARSE_MIN ; + int CMVAR_UFINE_FMIN_WRAP ; + int CMVAR_FINE_FMIN_WRAP ; + + if(speed == 10) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP; + } else if (speed == 25) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP; + } + + status |= E56phyRxRdSecondCode(hw, &SECOND_CODE); + + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE + 1); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + kr_dbg(KR_MODE, "ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE - 1); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + kr_dbg(KR_MODE, "ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + + return status; +} + +static int E56phyCtleBypassSeq(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + return status; +} + +static int E56phyRxsAdcAdaptSeq(struct txgbe_adapter *adapter, u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, lane_idx = 0; + u32 rdata = 0, addr = 0; + int status = 0; + + int timer = 0, j = 0; + + switch (adapter->bp_link_mode) { + case 10: + lane_num = 1; + break; + case 40: + lane_num = 4; + break; + case 25: + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait RXS0-3_OVRDVAL[1]::rxs0-3_rx0_cdr_rdy_o = 1 */ + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(12)), + 100, 200000, false, hw, 0x1544); + if (status) + kr_dbg(KR_MODE, "rxs%d_rx0_cdr_rdy_o = %x, %s.\n", + lane_idx, rdata, + status ? "FAILED" : "SUCCESS"); + } + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 7, 7, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 14, 14, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 9, 9, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_intl_cal_done_o bit17 = 1 */ + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(17)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, "rxs0_rx0_adc_intl_cal_done_o = %x, %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + /* 6. Perform ADC offset adaptation and ADC gain adaptation, + * repeat them a few times and after that keep it disabled. + */ + for (j = 0; j < 16; j++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + //usec_delay(1000); + /* set ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o bit1=0 */ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 1, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_ofst_adapt_done_o bit26 = 0 */ + status = read_poll_timeout(rd32_ephy, rdata, + !(rdata & BIT(26)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_ofst_adapt_done_o %d = %x, %s.\n", + j, rdata, status ? "FAILED" : "SUCCESS"); + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + //usec_delay(1000); + /* set ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o bit1=0 */ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 1, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_gain_adapt_done_o bit29 = 0 */ + status = read_poll_timeout(rd32_ephy, rdata, !(rdata & BIT(29)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_gain_adapt_done_o %d = %x, %s.\n", + j, rdata, status ? "FAILED" : "SUCCESS"); + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + /* 7. Perform ADC interleaver adaptation for 10ms or greater, + * and after that disable it + */ + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 31, 31, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(20); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + /* set ovrd_en_rxs0_rx0_adc_intl_adapt_en_i=0*/ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 6, 6, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + /* 8. Now re-enable VGA and CTLE trainings, so that it continues + * to adapt tracking changes in temperature or voltage + * <1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + */ + /* set rxs0_rx0_vga_train_en_i=1*/ + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 7, 7, 0x1); + if (bypassCtle == 0) + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + /* set ovrd_en_rxs0_rx0_vga_train_done_o = 0*/ + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 15, 15, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + /* Wait rxs0_rx0_vga_train_done_o bit8 = 0 */ + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(8)), + 100, 300000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, "rxs0_rx0_vga_train_done_o = %x, %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + if (bypassCtle == 0) { + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 15, 15, 0); + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if (bypassCtle == 0) + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + } + + return status; +} + +static int E56phyRxsCalibAdaptSeq(struct txgbe_adapter *adapter, u8 bplinkmode, + u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, lane_idx = 0; + int status = 0; + u32 rdata, addr; + + switch (bplinkmode) { + case 10: + lane_num = 1; + break; + case 40: + lane_num = 4; + break; + case 25: + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + rdata = 0x0000; + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 0, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 3, 3, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 23, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 17, 17, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 31, 31, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 6, 6, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + if (bypassCtle != 0) + status |= E56phyCtleBypassSeq(adapter); + + status |= E56phyRxsOscInitForTempTrackRange(adapter, bplinkmode); + + /* Wait an fsm_rx_sts 25G */ + kr_dbg(KR_MODE, + "Wait CTRL_FSM_RX_STAT[0]::ctrl_fsm_rx0_st to be ready ...\n"); + + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x1b1b1b1b & GENMASK(8 * lane_num - 1, 0))), + 1000, 300000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + kr_dbg(KR_MODE, "wait ctrl_fsm_rx0_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + + return status; +} + +static int E56phyCmsCfgForTempTrackRange(struct txgbe_adapter *adapter, u8 bplinkmode) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0, T = 40; + u32 addr, rdata; + + status = txgbe_e56_get_temp(hw, &T); + if (T < 40) { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } else if (T > 70) { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + } else { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + field_set(&rdata, 31, 29, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + field_set(&rdata, 31, 29, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_10_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + return status; +} + +static int E56phyTxFfeCfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* Setting the TX EQ main/pre1/pre2/post value */ + adapter->aml_txeq.main = S25G_TX_FFE_CFG_DAC_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_DAC_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_DAC_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_DAC_POST; + txgbe_wr32_ephy(hw, 0x141c, adapter->aml_txeq.main); + txgbe_wr32_ephy(hw, 0x1420, adapter->aml_txeq.pre1); + txgbe_wr32_ephy(hw, 0x1424, adapter->aml_txeq.pre2); + txgbe_wr32_ephy(hw, 0x1428, adapter->aml_txeq.post); + + return 0; +} + + +static int E56phy25gCfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 addr, rdata; + + rdata = 0x0000; + addr = E56PHY_CMS_PIN_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 24, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + E56phyTxFfeCfg(adapter); + + rdata = 0x0000; + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1, 0x700); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1, 0x2418); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1, 0x1); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT, 0x7fb); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1, 0x0); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xa); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1, 0x3333); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1f8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0xf0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__RXS0_FOM_18__ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB, 0x0); + //change 0x90 to 0x0 to fix 25G link up keep when cable unplugged + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB, 0x0); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB, + E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, 18); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, 1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + //txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + //txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + addr = 0x6cc; + rdata = 0x8020000; + txgbe_wr32_ephy(hw, addr, rdata); + addr = 0x94; + rdata = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x0); + field_set(&rdata, 14, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int E56phy10gCfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 addr, rdata; + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G__CMS_ANA_OVRDVAL_7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G__CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G__CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + E56phyTxFfeCfg(adapter); + + rdata = 0x0000; + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->prediv0 = 0xfa0; + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->target_cnt0= 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_range_sel0= 0x2; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->vco_code_init= 0x7ff; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_current_boost_en0= 0x1; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xc); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, 0x18); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, 1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + //txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + //txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + addr = 0x6cc; + rdata = 0x8020000; + txgbe_wr32_ephy(hw, addr, rdata); + addr = 0x94; + rdata = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x6); + field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int setphylinkmode(struct txgbe_adapter *adapter, + u8 bplinkmode, u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, status = 0; + u32 rdata = 0; + + u32 speed_select = 0; + u32 pcs_type_sel = 0; + u32 cns_en = 0; + u32 rsfec_en = 0; + u32 pma_type = 0; + u32 an0_rate_select = 0; + + switch (bplinkmode) { + case 10: + bplinkmode = 10; + lane_num = 1; + speed_select = 0; /* 10 Gb/s */ + pcs_type_sel = 0; /* 10GBASE-R PCS Type */ + cns_en = 0; /* CNS_EN disable */ + rsfec_en = 0; /* RS-FEC disable */ + pma_type = 0xb; /* 10GBASE-KR PMA/PMD type */ + an0_rate_select = 2; /* 10G-KR */ + break; + case 40: + bplinkmode = 40; + lane_num = 4; + speed_select = 3; /* 40 Gb/s */ + pcs_type_sel = 4; /* 40GBASE-R PCS Type */ + cns_en = 0; /* CNS_EN disable */ + rsfec_en = 0; /* RS-FEC disable */ + pma_type = 0b0100001; /* 40GBASE-CR PMA/PMD type */ + an0_rate_select = 4; /* 40G-KR: 3 40G-CR: 4 */ + break; + case 25: + bplinkmode = 25; + lane_num = 1; + speed_select = 5; /* 25 Gb/s */ + pcs_type_sel = 7; /* 25GBASE-R PCS Type */ + cns_en = 1; /* CNS_EN */ + rsfec_en = 1; /* RS-FEC enable*/ + pma_type = 0b0111001; /* 25GBASE-KR PMA/PMD type */ + an0_rate_select = 9; /* 9/10/17 25GK/CR-S or 25GK/CR */ + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid bplinkmode\n", __func__, __LINE__); + break; + } + + adapter->curbp_link_mode = bplinkmode; + /* To switch to the 40G mode Ethernet operation, complete the following steps:*/ + /* 1. Initiate the vendor-specific software reset by programming + * the VR_RST field (bit [15]) of the VR_PCS_DIG_CTRL1 register to 1. + */ + rdata = txgbe_rd32_epcs(hw, 0x038000); + txgbe_wr32_epcs(hw, 0x038000, rdata | BIT(15)); + + /* 2. Wait for the hardware to clear the value for the VR_RST + * field (bit [15]) of the VR_PCS_DIG_CTRL1 register. + */ + kr_dbg(KR_MODE, "Wait for the bit [15] (VR_RST) to get cleared.\n"); + status = read_poll_timeout(txgbe_rd32_epcs, rdata, + FIELD_GET_M(BIT(15), rdata) == 0, 100, + 200000, false, hw, + 0x038000); + kr_dbg(KR_MODE, "Wait PHY VR_RST = %x, Wait VR_RST %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* wait rx/tx/cm powerdn_st according pmd 50 2.0.5 */ + status = read_poll_timeout(rd32_ephy, rdata, + (rdata & GENMASK(3, 0)) == 0x9, 100, + 200000, false, hw, 0x14d4); + kr_dbg(KR_MODE, "wait ctrl_fsm_cm_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* 3. Write 4'b0011 to bits [5:2] of the SR_PCS_CTRL1 register. + * 10G: 0 25G: 5 40G: 3 + */ + rdata = txgbe_rd32_epcs(hw, 0x030000); + field_set(&rdata, 5, 2, speed_select); + txgbe_wr32_epcs(hw, 0x030000, rdata); + + /* 4. Write pcs mode sel to bits [3:0] of the SR_PCS_CTRL2 register. + * 10G: 0 25G: 4'b0111 40G: 4'b0100 + */ + rdata = txgbe_rd32_epcs(hw, 0x030007); + field_set(&rdata, 3, 0, pcs_type_sel); + txgbe_wr32_epcs(hw, 0x030007, rdata); + + /* 0 1 1 1 0 0 1 : 25GBASE-KR or 25GBASE-KR-S PMA/PMD type + * 0 1 1 1 0 0 0 : 25GBASE-CR or 25GBASE-CR-S PMA/PMD type + * 0 1 0 0 0 0 1 : 40GBASE-CR4 PMA/PMD type + * 0 1 0 0 0 0 0 : 40GBASE-KR4 PMA/PMD type + * 0 0 0 1 0 1 1 : 10GBASE-KR PMA/PMD type + */ + rdata = txgbe_rd32_epcs(hw, 0x010007); + field_set(&rdata, 6, 0, pma_type); + txgbe_wr32_epcs(hw, 0x010007, rdata); + + /* 5. Write only 25g en to Bits [1:0] of VR_PCS_DIG_CTRL3 register. */ + rdata = txgbe_rd32_epcs(hw, 0x38003); + field_set(&rdata, 1, 0, cns_en); + txgbe_wr32_epcs(hw, 0x38003, rdata); + + /* 6. Program PCS_AM_CNT field of VR_PCS_AM_CNT register to 'd16383 to + * configure the alignment marker interval. To speed-up simulation, + * program a smaller value to this field. + */ + if (bplinkmode == 40) + txgbe_wr32_epcs(hw, 0x38018, 16383); + + /* 7. Program bit [2] of SR_PMA_RS_FEC_CTRL register to 0 + * if previously 1 (as RS-FEC is supported in 25G Mode). + */ + + rdata = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&rdata, 2, 2, rsfec_en); + txgbe_wr32_epcs(hw, 0x100c8, rdata); + + /* 8. To enable BASE-R FEC (if desired), set bit [0]. + * in SR_PMA_KR_FEC_CTRL register + */ + + /* 3. temp applied */ + //status = E56phyCmsCfgForTempTrackRange(adapter, bplinkmode); + + /* 4. set phy an status to 0 */ + //txgbe_wr32_ephy(hw, 0x1640, 0x0000); + rdata = rd32_ephy(hw, 0x1434); + field_set(&rdata, 7, 4, 0xe); // anstatus in single mode just set to 0xe + txgbe_wr32_ephy(hw, 0x1434, rdata); + + /* 9. Program Enterprise 56G PHY regs through its own APB interface: + * a. Program PHY registers as mentioned in Table 6-6 on page 1197 to + * configure the PHY to 40G + * Mode. For fast-simulation mode, additionally program, + * the registers shown in the Table 6-7 on page 1199 + * b. Enable the PMD by setting pmd_en field in PMD_CFG[0] (0x1400) + * register + */ + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&rdata, 29, 29, 0x1); + field_set(&rdata, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDVAL5); + field_set(&rdata, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&rdata, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&rdata, 30, 30, 0x1); + field_set(&rdata, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_CFG0); + field_set(&rdata, 25, 24, 0x1); + field_set(&rdata, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_CFG2); + field_set(&rdata, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL1_CFG0); + field_set(&rdata, 25, 24, 0x1); + field_set(&rdata, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL1_CFG2); + field_set(&rdata, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&rdata, 18, 8, 0x294); + field_set(&rdata, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&rdata, 30, 28, 0x7); + field_set(&rdata, 26, 24, 0x5); + if (bplinkmode == 10 || bplinkmode == 40) + field_set(&rdata, 18, 16, 0x5); + else if (bplinkmode == 25) + field_set(&rdata, 18, 16, 0x3); + field_set(&rdata, 14, 12, 0x5); + field_set(&rdata, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&rdata, 26, 24, 0x5); + field_set(&rdata, 10, 8, 0x5); + if (bplinkmode == 10 || bplinkmode == 40) { + field_set(&rdata, 18, 16, 0x5); + field_set(&rdata, 2, 0, 0x5); + } else if (bplinkmode == 25) { + field_set(&rdata, 18, 16, 0x3); + field_set(&rdata, 2, 0, 0x3); + } + txgbe_wr32_ephy(hw, DATAPATH_CFG1, rdata); + + rdata = rd32_ephy(hw, AN_CFG1); + field_set(&rdata, 4, 0, an0_rate_select); + txgbe_wr32_ephy(hw, AN_CFG1, rdata); + + status = E56phyCmsCfgForTempTrackRange(adapter, bplinkmode); + + if (bplinkmode == 10) + E56phy10gCfg(adapter); + else if (bplinkmode == 25) + E56phy25gCfg(adapter); + else if (bplinkmode == 40) + txgbe_e56_cfg_40g(hw); + + return status; +} + +int txgbe_e56_set_phylinkmode(struct txgbe_adapter *adapter, + u8 bplinkmode, u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + switch (bplinkmode) { + case TXGBE_LINK_SPEED_10GB_FULL: + case 10: + bplinkmode = 10; + break; + case TXGBE_LINK_SPEED_40GB_FULL: + case 40: + bplinkmode = 40; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + case 25: + bplinkmode = 25; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid bplinkmode\n", __func__, __LINE__); + break; + } + + adapter->an_done = false; + if (adapter->curbp_link_mode == 10) + return 0; + kr_dbg(KR_MODE, "Setup to backplane mode ==========\n"); + + if (adapter->backplane_an) { + u32 backplane_mode = 0; + u32 fec_advertise = 0; + + adapter->an_done = false; + /* pcs + phy rst */ + rdata = rd32(hw, 0x1000c); + if (hw->bus.lan_id == 1) + rdata |= BIT(16); + else + rdata |= BIT(19); + wr32(hw, 0x1000c, rdata); + msleep(20); + + /* clear interrupt */ + txgbe_wr32_epcs(hw, 0x070000, 0); + txgbe_wr32_epcs(hw, 0x030000, 0x8000); + rdata = txgbe_rd32_epcs(hw, 0x070000); + field_set(&rdata, 12, 12, 0x1); + txgbe_wr32_epcs(hw, 0x070000, rdata); + txgbe_wr32_epcs(hw, 0x078002, 0x0000); + /* pcs case fec en to work around first */ + txgbe_wr32_epcs(hw, 0x100ab, 1); + + if (txgbe_is_backplane(hw)) { + if ((hw->device_id & 0xFF) == 0x10) { + backplane_mode |= 0x80; + fec_advertise |= TXGBE_10G_FEC_ABL; + } else if ((hw->device_id & 0xFF) == 0x25) { + backplane_mode |= 0xc000; + fec_advertise |= TXGBE_25G_RS_FEC_REQ | + TXGBE_25G_BASE_FEC_REQ; + } else if ((hw->device_id & 0xFF) == 0x40) { + backplane_mode |= BIT(9) | BIT(8); + fec_advertise |= TXGBE_10G_FEC_ABL; + } + } else { + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_10GB_FULL) + == TXGBE_LINK_SPEED_10GB_FULL) { + backplane_mode |= 0x80; + fec_advertise |= TXGBE_10G_FEC_ABL; + } + + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_25GB_FULL) + == TXGBE_LINK_SPEED_25GB_FULL) { + backplane_mode |= 0xc000; + fec_advertise |= TXGBE_25G_RS_FEC_REQ | + TXGBE_25G_BASE_FEC_REQ; + } + + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_40GB_FULL) + == TXGBE_LINK_SPEED_40GB_FULL) { + backplane_mode |= BIT(9) | BIT(8); + fec_advertise |= TXGBE_10G_FEC_ABL; + } + } + + txgbe_wr32_epcs(hw, 0x070010, 0x0001); + + /* 10GKR:7-25KR:14/15-40GKR:8-40GCR:9 */ + txgbe_wr32_epcs(hw, 0x070011, backplane_mode | 0x11); + + /* BASE-R FEC */ + rdata = txgbe_rd32_epcs(hw, 0x70012); + txgbe_wr32_epcs(hw, 0x70012, fec_advertise); + + txgbe_wr32_epcs(hw, 0x070016, 0x0000); + txgbe_wr32_epcs(hw, 0x070017, 0x0); + txgbe_wr32_epcs(hw, 0x070018, 0x0); + + /* config timer */ + txgbe_wr32_epcs(hw, 0x078004, 0x003c); + txgbe_wr32_epcs(hw, 0x078005, CL74_KRTR_TRAINNING_TIMEOUT); + txgbe_wr32_epcs(hw, 0x078006, 25); + txgbe_wr32_epcs(hw, 0x078000, 0x0008 | BIT(2)); + + kr_dbg(KR_MODE, "1.2 Wait 10G KR phy/pcs mode init ....\n"); + status = setphylinkmode(adapter, 10, bypassCtle); + if (status) + return status; + + /* 5. CM_ENABLE */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 21, 20, 0x3);//pll en + field_set(&rdata, 19, 12, 0x0);// tx disable + field_set(&rdata, 8, 8, 0x0);// pmd mode + field_set(&rdata, 1, 1, 0x1);// pmd en + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 6, TX_ENABLE */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 12, 0x1);// tx en + txgbe_wr32_ephy(hw, 0x1400, rdata); + + kr_dbg(KR_MODE, "1.3 Wait 10G PHY RXS....\n"); + status = E56phyRxsOscInitForTempTrackRange(adapter, 10); + if (status) + return status; + + /* Wait an 10g fsm_rx_sts */ + status = read_poll_timeout(rd32_ephy, rdata, + ((rdata & 0x3f) == 0xb), 1000, + 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + kr_dbg(KR_MODE, "Wait 10g fsm_rx_sts = %x, Wait rx_sts %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + rdata = txgbe_rd32_epcs(hw, 0x070000); + field_set(&rdata, 12, 12, 0x1); + txgbe_wr32_epcs(hw, 0x070000, rdata); + kr_dbg(KR_MODE, "Setup the backplane mode========end ==\n"); + } else { + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_40GB_FULL) + == TXGBE_LINK_SPEED_40GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_40GB_FULL); + else if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_25GB_FULL) + == TXGBE_LINK_SPEED_25GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_25GB_FULL); + else if (hw->phy.fiber_suppport_speed == + TXGBE_LINK_SPEED_10GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_10GB_FULL); + } + + return status; +} + +static void txgbe_e56_print_page_status(struct txgbe_adapter *adapter, + bkpan73ability *tBkpAn73Ability, + bkpan73ability *tLpBkpAn73Ability) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0; + + /* Read the local AN73 Base Page Ability Registers */ + kr_dbg(KR_MODE, "Read the local Base Page Ability Registers\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + tBkpAn73Ability->nextPage = (rdata & BIT(15)) ? 1 : 0; + kr_dbg(KR_MODE, "\tread 70010 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + kr_dbg(KR_MODE, "\tread 70011 data %0x\n", rdata); + tBkpAn73Ability->linkAbility = (rdata >> 5) & GENMASK(10, 0); + /* amber-lite only support 10GKR - 25GKR/CR - 25GKR-S/CR-S */ + kr_dbg(KR_MODE, "\t10GKR : %x\t25GKR/CR-S: %x\t25GKR/CR : %x\n", + tBkpAn73Ability->linkAbility & BIT(ABILITY_10GBASE_KR) ? 1 : 0, + tBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR_S) ? 1 : 0, + tBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR) ? 1 : 0); + kr_dbg(KR_MODE, "\t40GCR4 : %x\t40GKR4 : %x\n", + tBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_CR4) ? 1 : 0, + tBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_KR4) ? 1 : 0); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG3); + kr_dbg(KR_MODE, "\tF1:FEC Req\tF0:FEC Sup\tF3:25GFEC\tF2:25GRS\n"); + kr_dbg(KR_MODE, "\tF1: %d\t\tF0: %d\t\tF3: %d\t\tF2: %d\n", + ((rdata >> 15) & 0x01), ((rdata >> 14) & 0x01), + ((rdata >> 13) & 0x01), ((rdata >> 12) & 0x01)); + tBkpAn73Ability->fecAbility = rdata; + kr_dbg(KR_MODE, "\tread 70012 data %0x\n", rdata); + + /* Read the link partner AN73 Base Page Ability Registers */ + kr_dbg(KR_MODE, "Read the link partner Base Page Ability Registers\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + tLpBkpAn73Ability->nextPage = (rdata & BIT(15)) ? 1 : 0; + kr_dbg(KR_MODE, "\tread 70013 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL2); + tLpBkpAn73Ability->linkAbility = (rdata >> 5) & GENMASK(10, 0); + kr_dbg(KR_MODE, "\tread 70014 data %0x\n", rdata); + kr_dbg(KR_MODE, "\tKX : %x\tKX4 : %x\n", + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_1000BASE_KX) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_10GBASE_KX4) ? 1 : 0); + kr_dbg(KR_MODE, "\t10GKR : %x\t25GKR/CR-S: %x\t25GKR/CR : %x\n", + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_10GBASE_KR) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR_S) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR) ? 1 : 0); + kr_dbg(KR_MODE, "\t40GCR4 : %x\t40GKR4 : %x\n", + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_CR4) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_KR4) ? 1 : 0); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL3); + kr_dbg(KR_MODE, "\tF1:FEC Req\tF0:FEC Sup\tF3:25GFEC\tF2:25GRS\n"); + kr_dbg(KR_MODE, "\tF1: %d\t\tF0: %d\t\tF3: %d\t\tF2: %d\n", + ((rdata >> 15) & 0x01), ((rdata >> 14) & 0x01), + ((rdata >> 13) & 0x01), ((rdata >> 12) & 0x01)); + tLpBkpAn73Ability->fecAbility = rdata; + adapter->fec_mode = 0; + if (rdata & TXGBE_25G_RS_FEC_REQ) + adapter->fec_mode |= TXGBE_25G_RS_FEC_REQ; + if (rdata & TXGBE_25G_BASE_FEC_REQ) + adapter->fec_mode |= TXGBE_25G_BASE_FEC_REQ; + if (rdata & TXGBE_10G_FEC_ABL) + adapter->fec_mode |= TXGBE_10G_FEC_ABL; + if (rdata & TXGBE_10G_FEC_REQ) + adapter->fec_mode |= TXGBE_10G_FEC_REQ; + kr_dbg(KR_MODE, "\tread 70015 data %0x\n", rdata); + + kr_dbg(KR_MODE, "\tread 70016 data %0x\n", txgbe_rd32_epcs(hw, 0x70016)); + kr_dbg(KR_MODE, "\tread 70017 data %0x\n", txgbe_rd32_epcs(hw, 0x70017)); + kr_dbg(KR_MODE, "\tread 70018 data %0x\n", txgbe_rd32_epcs(hw, 0x70018)); + kr_dbg(KR_MODE, "\tread 70019 data %0x\n", txgbe_rd32_epcs(hw, 0x70019)); + kr_dbg(KR_MODE, "\tread 7001a data %0x\n", txgbe_rd32_epcs(hw, 0x7001a)); + kr_dbg(KR_MODE, "\tread 7001b data %0x\n", txgbe_rd32_epcs(hw, 0x7001b)); + +} + +static int chk_bkp_ability(struct txgbe_adapter *adapter, + bkpan73ability tBkpAn73Ability, + bkpan73ability tLpBkpAn73Ability) +{ + unsigned int comLinkAbility; + + kr_dbg(KR_MODE, "CheckBkpAn73Ability():\n"); + /* Check the common link ability and take action based on the result*/ + comLinkAbility = tBkpAn73Ability.linkAbility & + tLpBkpAn73Ability.linkAbility; + kr_dbg(KR_MODE, "comAbility= 0x%x, Ability= 0x%x, lpAbility= 0x%x\n", + comLinkAbility, tBkpAn73Ability.linkAbility, + tLpBkpAn73Ability.linkAbility); + + if (comLinkAbility == 0) { + adapter->bp_link_mode = 0; + kr_dbg(KR_MODE, "Do not support any compatible speed mode!\n"); + return -EINVAL; + } else if (comLinkAbility & BIT(ABILITY_40GBASE_CR4)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_40GBASE_CR4].\n"); + adapter->bp_link_mode = 40; + } else if (comLinkAbility & BIT(ABILITY_25GBASE_KRCR_S)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_25GBASE_KRCR_S].\n"); + adapter->fec_mode = TXGBE_25G_RS_FEC_REQ; + adapter->bp_link_mode = 25; + } else if (comLinkAbility & BIT(ABILITY_25GBASE_KRCR)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_25GBASE_KRCR].\n"); + adapter->bp_link_mode = 25; + } else if (comLinkAbility & BIT(ABILITY_10GBASE_KR)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_10GBASE_KR].\n"); + adapter->bp_link_mode = 10; + } + + return 0; +} + +static int txgbe_e56_exchange_page(struct txgbe_adapter *adapter) +{ + bkpan73ability tBkpAn73Ability = {0}, tLpBkpAn73Ability = {0}; + struct txgbe_hw *hw = &adapter->hw; + u32 an_int, base_page = 0; + int count = 0; + + an_int = txgbe_rd32_epcs(hw, 0x78002); + if (!(an_int & TXGBE_E56_AN_PG_RCV)) + return -EINVAL; + + /* 500ms timeout */ + for (count = 0; count < 5000; count++) { + u32 fsm = txgbe_rd32_epcs(hw, 0x78010); + kr_dbg(KR_MODE, "-----count----- %d - fsm: %x\n", + count, fsm); + if (an_int & TXGBE_E56_AN_PG_RCV) { + u8 next_page = 0; + u32 rdata, addr; + + txgbe_e56_print_page_status(adapter, &tBkpAn73Ability, + &tLpBkpAn73Ability); + addr = base_page == 0 ? 0x70013 : 0x70019; + rdata = txgbe_rd32_epcs(hw, addr); + if (rdata & BIT(14)) { + if (rdata & BIT(15)) { + /* always set null message */ + txgbe_wr32_epcs(hw, 0x70016, 0x2001); + kr_dbg(KR_MODE, "write 70016 0x%0x\n", + 0x2001); + rdata = txgbe_rd32_epcs(hw, 0x70010); + txgbe_wr32_epcs(hw, 0x70010, + rdata | BIT(15)); + kr_dbg(KR_MODE, "write 70010 0x%0x\n", + rdata); + next_page = 1; + } else { + next_page = 0; + } + base_page = 1; + } + /* clear an pacv int */ + rdata = txgbe_rd32_epcs(hw, 0x78002); + kr_dbg(KR_MODE, "read 78002 data %0x and clear pacv\n", rdata); + field_set(&rdata, 2, 2, 0x0); + txgbe_wr32_epcs(hw, 0x78002, rdata); + if (next_page == 0) { + if ((fsm & 0x8) == 0x8) { + adapter->fsm = 0x8; + goto check_ability; + } + } + } + usec_delay(100); + } + +check_ability: + return chk_bkp_ability(adapter, tBkpAn73Ability, tLpBkpAn73Ability); +} + +static int txgbe_e56_cl72_trainning(struct txgbe_adapter *adapter) +{ + u32 bylinkmode = adapter->bp_link_mode; + struct txgbe_hw *hw = &adapter->hw; + u8 bypassCtle = hw->bypassCtle; + int status = 0, pTempData = 0; + u32 lane_num = 0, lane_idx = 0; + u32 pmd_ctrl = 0; + u32 txffe = 0; + int ret = 0; + u32 rdata; + + u8 pll_en_cfg = 0; + u8 pmd_mode = 0; + + switch (bylinkmode) { + case 10: + bylinkmode = 10; + lane_num = 1; + pll_en_cfg = 3; + pmd_mode = 0; + break; + case 40: + bylinkmode = 40; + lane_num = 4; + pll_en_cfg = 0; /* pll_en_cfg : single link to 0 */ + pmd_mode = 1; /* pmd mode : 1 - single link */ + break; + case 25: + bylinkmode = 25; + lane_num = 1; + pll_en_cfg = 3; + pmd_mode = 0; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + kr_dbg(KR_MODE, "2.3 Wait %dG KR phy mode init ....\n", bylinkmode); + status = setphylinkmode(adapter, bylinkmode, bypassCtle); + + /* 13. set phy an status to 1 - AN_CFG[0]: 4-7 lane0-lane3 */ + rdata = rd32_ephy(hw, 0x1434); + field_set(&rdata, 7, 4, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1434, rdata); + + /* 14 and 15. kr training: set BASER_PMD_CONTROL[0, 7] for lane0-4 */ + rdata = rd32_ephy(hw, 0x1640); + field_set(&rdata, 7, 0, GENMASK(2 * lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1640, rdata); + + /* 16. enable CMS and its internal PLL */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 21, 20, pll_en_cfg); + field_set(&rdata, 19, 12, 0); /* tx/rx off */ + field_set(&rdata, 8, 8, pmd_mode); + field_set(&rdata, 1, 1, 0x1); /* pmd en */ + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 17. tx enable PMD_CFG[0] */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 15, 12, GENMASK(lane_num - 1, 0)); /* tx en */ + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 18 */ + /* 19. rxs calibration and adaotation sequeence */ + kr_dbg(KR_MODE, "2.4 Wait %dG RXS.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = E56phyRxsCalibAdaptSeq(adapter, bylinkmode, bypassCtle); + ret |= status; + /* 20 */ + kr_dbg(KR_MODE, "2.5 Wait %dG phy calibration.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + E56phySetRxsUfineLeMax(adapter, bylinkmode); + status = txgbe_e56_get_temp(hw, &pTempData); + if (bylinkmode == 40) + status = txgbe_temp_track_seq_40g(hw, TXGBE_LINK_SPEED_40GB_FULL); + else + status = E56phyRxsPostCdrLockTempTrackSeq(adapter, bylinkmode); + /* 21 */ + kr_dbg(KR_MODE, "2.6 Wait %dG phy kr training check.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = read_poll_timeout(rd32_ephy, rdata, + ((rdata & 0xe) & GENMASK(lane_num, 1)) == + (0xe & GENMASK(lane_num, 1)), 100, + 200000, false, hw, 0x163c); + pmd_ctrl = rd32_ephy(hw, 0x1644); + kr_dbg(KR_MODE, "KR TRAINNING CHECK = %x, %s. pmd_ctrl:%lx-%lx-%lx-%lx\n", + rdata, status ? "FAILED" : "SUCCESS", + FIELD_GET_M(GENMASK(3, 0), pmd_ctrl), + FIELD_GET_M(GENMASK(7, 4), pmd_ctrl), + FIELD_GET_M(GENMASK(11, 8), pmd_ctrl), + FIELD_GET_M(GENMASK(15, 12), pmd_ctrl)); + ret |= status; + kr_dbg(KR_MODE, "before: %x-%x-%x-%x\n", + rd32_ephy(hw, 0x141c), rd32_ephy(hw, 0x1420), + rd32_ephy(hw, 0x1424), rd32_ephy(hw, 0x1428)); + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + txffe = rd32_ephy(hw, 0x828 + lane_idx * 0x100); + kr_dbg(KR_MODE, "after[%x]: %lx-%lx-%lx-%lx\n", lane_idx, + FIELD_GET_M(GENMASK(6, 0), txffe), + FIELD_GET_M(GENMASK(21, 16), txffe), + FIELD_GET_M(GENMASK(29, 24), txffe), + FIELD_GET_M(GENMASK(13, 8), txffe)); + } + + /* 22 */ + kr_dbg(KR_MODE, "2.7 Wait %dG phy Rx adc.... fsm:%x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = E56phyRxsAdcAdaptSeq(adapter, bypassCtle); + + return ret; +} + +static int handle_e56_bkp_an73_flow(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + kr_dbg(KR_MODE, "2.1 Wait page changed ....\n"); + status = txgbe_e56_exchange_page(adapter); + if (status) { + kr_dbg(KR_MODE, "Exchange page failed\n"); + return status; + } + + kr_dbg(KR_MODE, "2.2 Wait page changed ..done..\n"); + txgbe_wr32_epcs(hw, 0x100ab, 0); + if (AN_TRAINNING_MODE) { + rdata = txgbe_rd32_epcs(hw, 0x70000); + kr_dbg(KR_MODE, "read 0x70000 data %0x\n", rdata); + txgbe_wr32_epcs(hw, 0x70000, 0); + kr_dbg(KR_MODE, "write 0x70000 0x%0x\n", 0); + } + + rdata = txgbe_rd32_epcs(hw, 0x78002); + kr_dbg(KR_MODE, "read 78002 data %0x and clear page int\n", rdata); + field_set(&rdata, 2, 2, 0x0); + txgbe_wr32_epcs(hw, 0x78002, rdata); + + /* 10 RXS_DISABLE - TXS_DISABLE - CMS_DISABLE */ + /* dis phy tx/rx lane */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0x0); + field_set(&rdata, 15, 12, 0x0); + field_set(&rdata, 1, 1, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + kr_dbg(KR_MODE, "Ephy Write A: 0x%x, D: 0x%x\n", 0x1400, rdata); + /* wait rx/tx/cm powerdn_st according pmd 50 2.0.5 */ + status = read_poll_timeout(rd32_ephy, rdata, + (rdata & GENMASK(3, 0)) == 0x9, 100, + 200000, false, hw, 0x14d4); + kr_dbg(KR_MODE, "wait ctrl_fsm_cm_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + if (adapter->fec_mode & TXGBE_25G_RS_FEC_REQ) { + txgbe_wr32_epcs(hw, 0x180a3, 0x68c1); + txgbe_wr32_epcs(hw, 0x180a4, 0x3321); + txgbe_wr32_epcs(hw, 0x180a5, 0x973e); + txgbe_wr32_epcs(hw, 0x180a6, 0xccde); + + txgbe_wr32_epcs(hw, 0x38018, 1024); + rdata = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&rdata, 2, 2, 1); + txgbe_wr32_epcs(hw, 0x100c8, rdata); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "RS-FEC"); + adapter->cur_fec_link = TXGBE_PHY_FEC_RS; + } else if (adapter->fec_mode & TXGBE_25G_BASE_FEC_REQ) { + /* FEC: FC-FEC/BASE-R */ + txgbe_wr32_epcs(hw, 0x100ab, BIT(0)); + kr_dbg(KR_MODE, "Epcs Write A: 0x%x, D: 0x%x\n", 0x100ab, 1); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "25GBASE-R"); + adapter->cur_fec_link = TXGBE_PHY_FEC_BASER; + } else if (adapter->fec_mode & (TXGBE_10G_FEC_REQ)) { + /* FEC: FC-FEC/BASE-R */ + txgbe_wr32_epcs(hw, 0x100ab, BIT(0)); + kr_dbg(KR_MODE, "Epcs Write A: 0x%x, D: 0x%x\n", 0x100ab, 1); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "BASE-R"); + adapter->cur_fec_link = TXGBE_PHY_FEC_BASER; + } else { + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "NONE"); + adapter->cur_fec_link = TXGBE_PHY_FEC_OFF; + } + + status = txgbe_e56_cl72_trainning(adapter); + rdata = rd32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0x28); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0xa); + txgbe_wr32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR, rdata); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + return status; +} + +void txgbe_e56_bp_watchdog_event(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rlu = 0, an_int = 0, an_int1 = 0; + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0, fsm = 0; + int ret = 0; + + if (!(hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + return; + + /* only continue if link is down */ + if (netif_carrier_ok(netdev)) + return; + + if (!adapter->backplane_an) + return; + + value = txgbe_rd32_epcs(hw, 0x78002); + an_int = value; + if (value & TXGBE_E56_AN_INT_CMPLT) { + adapter->an_done = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + } + + if (value & TXGBE_E56_AN_INC_LINK) { + field_set(&value, 1, 1, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + } + + if (value & TXGBE_E56_AN_TXDIS) { + field_set(&value, 3, 3, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + goto an_status; + } + + if (value & TXGBE_E56_AN_PG_RCV) { + kr_dbg(KR_MODE, "Enter training\n"); + ret = handle_e56_bkp_an73_flow(adapter); + if (!AN_TRAINNING_MODE) { + fsm = txgbe_rd32_epcs(hw, 0x78010); + if (fsm & 0x8) + goto an_status; + if (ret) { + kr_dbg(KR_MODE, "Training FAILED, do reset\n"); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + } else { + kr_dbg(KR_MODE, "ALL SUCCESSED\n"); + } + } else { + if (ret) { + kr_dbg(KR_MODE, "Training FAILED, do reset\n"); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + } else { + adapter->an_done = true; + } + } + } + +an_status: + an_int1 = txgbe_rd32_epcs(hw, 0x78002); + if (an_int1 & TXGBE_E56_AN_INT_CMPLT) { + adapter->an_done = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } + rlu = txgbe_rd32_epcs(hw, 0x30001); + kr_dbg(KR_MODE, "RLU:%x MLU:%x INT:%x-%x CTL:%x fsm:%x pmd_cfg0:%x an_done:%d by:%d\n", + txgbe_rd32_epcs(hw, 0x30001), rd32(hw, 0x14404), + an_int, an_int1, + txgbe_rd32_epcs(hw, 0x70000), + txgbe_rd32_epcs(hw, 0x78010), + rd32_ephy(hw, 0x1400), + adapter->an_done, + hw->bypassCtle); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h new file mode 100644 index 0000000000000000000000000000000000000000..ec43df3da0ce45c9d22499510032576de16c4ba4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h @@ -0,0 +1,268 @@ +#ifndef _TXGBE_E56_BP_H_ +#define _TXGBE_E56_BP_H_ + +#define TXGBE_E56_AN_TXDIS BIT(3) +#define TXGBE_E56_AN_PG_RCV BIT(2) +#define TXGBE_E56_AN_INC_LINK BIT(1) +#define TXGBE_E56_AN_INT_CMPLT BIT(0) + +#define TXGBE_10G_FEC_REQ BIT(15) +#define TXGBE_10G_FEC_ABL BIT(14) +#define TXGBE_25G_BASE_FEC_REQ BIT(13) +#define TXGBE_25G_RS_FEC_REQ BIT(12) + +typedef union { + struct { + u32 tx0_cursor_factor : 7; + u32 rsvd0 : 1; + u32 tx1_cursor_factor : 7; + u32 rsvd1 : 1; + u32 tx2_cursor_factor : 7; + u32 rsvd2 : 1; + u32 tx3_cursor_factor : 7; + u32 rsvd3 : 1; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_1; + +#define E56G__PMD_TX_FFE_CFG_1_NUM 1 +#define E56G__PMD_TX_FFE_CFG_1_ADDR (E56G__BASEADDR+0x141c) +#define E56G__PMD_TX_FFE_CFG_1_PTR ((E56G__PMD_TX_FFE_CFG_1 *)(E56G__PMD_TX_FFE_CFG_1_ADDR)) +#define E56G__PMD_TX_FFE_CFG_1_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_1_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_1_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_1_READ_MSB 30 +#define E56G__PMD_TX_FFE_CFG_1_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_1_WRITE_MSB 30 +#define E56G__PMD_TX_FFE_CFG_1_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_1_RESET_VALUE 0x3f3f3f3f + +typedef union { + struct { + u32 tx0_precursor1_factor : 6; + u32 rsvd0 : 2; + u32 tx1_precursor1_factor : 6; + u32 rsvd1 : 2; + u32 tx2_precursor1_factor : 6; + u32 rsvd2 : 2; + u32 tx3_precursor1_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_2; + +#define E56G__PMD_TX_FFE_CFG_2_NUM 1 +#define E56G__PMD_TX_FFE_CFG_2_ADDR (E56G__BASEADDR+0x1420) +#define E56G__PMD_TX_FFE_CFG_2_PTR ((E56G__PMD_TX_FFE_CFG_2 *)(E56G__PMD_TX_FFE_CFG_2_ADDR)) +#define E56G__PMD_TX_FFE_CFG_2_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_2_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_2_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_2_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_2_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_2_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_2_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_2_RESET_VALUE 0x0 + +typedef union { + struct { + u32 tx0_precursor2_factor : 6; + u32 rsvd0 : 2; + u32 tx1_precursor2_factor : 6; + u32 rsvd1 : 2; + u32 tx2_precursor2_factor : 6; + u32 rsvd2 : 2; + u32 tx3_precursor2_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_3; +#define E56G__PMD_TX_FFE_CFG_3_NUM 1 +#define E56G__PMD_TX_FFE_CFG_3_ADDR (E56G__BASEADDR+0x1424) +#define E56G__PMD_TX_FFE_CFG_3_PTR ((E56G__PMD_TX_FFE_CFG_3 *)(E56G__PMD_TX_FFE_CFG_3_ADDR)) +#define E56G__PMD_TX_FFE_CFG_3_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_3_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_3_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_3_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_3_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_3_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_3_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_3_RESET_VALUE 0x0 + +typedef union { + struct { + u32 tx0_postcursor_factor : 6; + u32 rsvd0 : 2; + u32 tx1_postcursor_factor : 6; + u32 rsvd1 : 2; + u32 tx2_postcursor_factor : 6; + u32 rsvd2 : 2; + u32 tx3_postcursor_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_4; +#define E56G__PMD_TX_FFE_CFG_4_NUM 1 +#define E56G__PMD_TX_FFE_CFG_4_ADDR (E56G__BASEADDR+0x1428) +#define E56G__PMD_TX_FFE_CFG_4_PTR ((E56G__PMD_TX_FFE_CFG_4 *)(E56G__PMD_TX_FFE_CFG_4_ADDR)) +#define E56G__PMD_TX_FFE_CFG_4_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_4_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_4_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_4_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_4_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_4_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_4_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_4_RESET_VALUE 0x0 + +typedef union { + struct { + u32 ana_lcpll_lf_vco_swing_ctrl_i : 4; + u32 ana_lcpll_lf_lpf_setcode_calib_i : 5; + u32 rsvd0 : 3; + u32 ana_lcpll_lf_vco_coarse_bin_i : 5; + u32 rsvd1 : 3; + u32 ana_lcpll_lf_vco_fine_therm_i : 8; + u32 ana_lcpll_lf_clkout_fb_ctrl_i : 2; + u32 rsvd2 : 2; + }; + u32 reg; +} E56G__CMS_ANA_OVRDVAL_7; +#define E56G__CMS_ANA_OVRDVAL_7_NUM 1 +#define E56G__CMS_ANA_OVRDVAL_7_ADDR (E56G__BASEADDR+0xccc) +#define E56G__CMS_ANA_OVRDVAL_7_PTR ((E56G__CMS_ANA_OVRDVAL_7 *)(E56G__CMS_ANA_OVRDVAL_7_ADDR)) +#define E56G__CMS_ANA_OVRDVAL_7_STRIDE 4 +#define E56G__CMS_ANA_OVRDVAL_7_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_7_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_7_READ_MSB 29 +#define E56G__CMS_ANA_OVRDVAL_7_READ_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_7_WRITE_MSB 29 +#define E56G__CMS_ANA_OVRDVAL_7_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_7_RESET_VALUE 0x0 + +typedef union { + struct { + u32 ovrd_en_ana_lcpll_hf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_hf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_lf_en_bias_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_loop_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_cp_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_base_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_fine_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_setcode_calib_i : 1; + u32 ovrd_en_ana_lcpll_lf_set_lpf_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_vco_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_coarse_bin_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_fine_therm_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_lf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_hf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_lf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_hf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_test_slicer_i : 1; + u32 ovrd_en_ana_test_sampler_i : 1; + }; + u32 reg; +} E56G__CMS_ANA_OVRDEN_1; +#define E56G__CMS_ANA_OVRDEN_1_NUM 1 +#define E56G__CMS_ANA_OVRDEN_1_ADDR (E56G__BASEADDR+0xca8) +#define E56G__CMS_ANA_OVRDEN_1_PTR ((E56G__CMS_ANA_OVRDEN_1 *)(E56G__CMS_ANA_OVRDEN_1_ADDR)) +#define E56G__CMS_ANA_OVRDEN_1_STRIDE 4 +#define E56G__CMS_ANA_OVRDEN_1_SIZE 32 +#define E56G__CMS_ANA_OVRDEN_1_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDEN_1_READ_MSB 31 +#define E56G__CMS_ANA_OVRDEN_1_READ_LSB 0 +#define E56G__CMS_ANA_OVRDEN_1_WRITE_MSB 31 +#define E56G__CMS_ANA_OVRDEN_1_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDEN_1_RESET_VALUE 0x0 + +typedef union { + struct { + u32 ana_lcpll_lf_test_in_i : 32; + }; + u32 reg; +} E56G__CMS_ANA_OVRDVAL_9; +#define E56G__CMS_ANA_OVRDVAL_9_NUM 1 +#define E56G__CMS_ANA_OVRDVAL_9_ADDR (E56G__BASEADDR+0xcd4) +#define E56G__CMS_ANA_OVRDVAL_9_PTR ((E56G__CMS_ANA_OVRDVAL_9 *)(E56G__CMS_ANA_OVRDVAL_9_ADDR)) +#define E56G__CMS_ANA_OVRDVAL_9_STRIDE 4 +#define E56G__CMS_ANA_OVRDVAL_9_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_9_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_9_READ_MSB 31 +#define E56G__CMS_ANA_OVRDVAL_9_READ_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_9_WRITE_MSB 31 +#define E56G__CMS_ANA_OVRDVAL_9_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_9_RESET_VALUE 0x0 + +#define SFP2_RS0 5 +#define SFP2_RS1 4 +#define SFP2_TX_DISABLE 1 +#define SFP2_TX_FAULT 0 +#define SFP2_RX_LOS_BIT 3 +#ifdef PHYINIT_TIMEOUT +#undef PHYINIT_TIMEOUT +#define PHYINIT_TIMEOUT 2000 +#endif + +#define E56PHY_CMS_ANA_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0xA4) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_DAISY_EN_I 0, 0 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_PAD_EN_I 1, 1 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_PAD_EN_I_LSB 1 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_VDDINOFF_DCORE_DIG_O 2, 2 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_EN_I 11, 11 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_EN_I_LSB 11 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_TESTIN_I 12, 12 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_TESTIN_I_LSB 12 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RESCAL_I 13, 13 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RESCAL_I_LSB 13 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_COMP_O 14, 14 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_COMP_O_LSB 14 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_CODE_I 15, 15 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_CODE_I_LSB 15 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_LDO_CORE_I 16, 16 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_LDO_CORE_I_LSB 16 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_LDO_I 17, 17 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_LDO_I_LSB 17 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_ANA_DEBUG_SEL_I 18, 18 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_ANA_DEBUG_SEL_I_LSB 18 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_BIAS_I 19, 19 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_BIAS_I_LSB 19 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_LOOP_I 20, 20 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_LOOP_I_LSB 20 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_CP_I 21, 21 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_CP_I_LSB 21 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_BASE_I 22, 22 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_BASE_I_LSB 22 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_FINE_I 23, 23 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_FINE_I_LSB 23 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_CTRL_I 24, 24 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_CTRL_I_LSB 24 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I 25, 25 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I_LSB 25 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_SET_LPF_I 26, 26 + +#define E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I 20, 16 +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I 12, 12 +#define E56PHY_CMS_ANA_OVRDVAL_7_ADDR (E56PHY_CMS_BASE_ADDR+0xCC) +#define E56PHY_CMS_ANA_OVRDVAL_5_ADDR (E56PHY_CMS_BASE_ADDR+0xC4) +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_TEST_IN_I 23, 23 +#define E56PHY_CMS_ANA_OVRDVAL_9_ADDR (E56PHY_CMS_BASE_ADDR+0xD4) +#define E56PHY_CMS_ANA_OVRDVAL_10_ADDR (E56PHY_CMS_BASE_ADDR+0xD8) +#define E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I 8, 4 + +int txgbe_e56_set_phylinkmode(struct txgbe_adapter *adapter, + unsigned char byLinkMode, unsigned int bypassCtle); +void txgbe_e56_bp_watchdog_event(struct txgbe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da112586a427043e2f608793d4d81226bcc63..3138c8d19dc93d4bc0effda7fecfc20b4955754a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -1,47 +1,5908 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_ethtool.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ +/* ethtool support for txgbe */ + +#include +#include #include -#include #include +#include +#include +#include +#include +#ifdef SIOCETHTOOL +#include -#include "../libwx/wx_ethtool.h" -#include "../libwx/wx_type.h" -#include "txgbe_type.h" -#include "txgbe_ethtool.h" +#include "txgbe.h" +#include "txgbe_hw.h" +#if defined(ETHTOOL_GMODULEINFO)||defined(HAVE_ETHTOOL_SET_PHYS_ID) +#include "txgbe_phy.h" +#include "txgbe_e56.h" +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO +#include +#endif -static int txgbe_nway_reset(struct net_device *netdev) +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#define TXGBE_ALL_RAR_ENTRIES 16 + +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif + +#include "txgbe_xsk.h" + +#define ETHTOOL_LINK_MODE_SPEED_MASK 0xfffe903f + +#ifdef ETHTOOL_GSTATS +struct txgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define TXGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct txgbe_stats txgbe_gstrings_net_stats[] = { + TXGBE_NETDEV_STAT(rx_packets), + TXGBE_NETDEV_STAT(tx_packets), + TXGBE_NETDEV_STAT(rx_bytes), + TXGBE_NETDEV_STAT(tx_bytes), + TXGBE_NETDEV_STAT(rx_errors), + TXGBE_NETDEV_STAT(tx_errors), + TXGBE_NETDEV_STAT(rx_dropped), + TXGBE_NETDEV_STAT(tx_dropped), + TXGBE_NETDEV_STAT(collisions), + TXGBE_NETDEV_STAT(rx_over_errors), + TXGBE_NETDEV_STAT(rx_crc_errors), + TXGBE_NETDEV_STAT(rx_frame_errors), + TXGBE_NETDEV_STAT(rx_fifo_errors), + TXGBE_NETDEV_STAT(rx_missed_errors), + TXGBE_NETDEV_STAT(tx_aborted_errors), + TXGBE_NETDEV_STAT(tx_carrier_errors), + TXGBE_NETDEV_STAT(tx_fifo_errors), + TXGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define TXGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct txgbe_adapter, _stat), \ + .stat_offset = offsetof(struct txgbe_adapter, _stat) \ +} +static struct txgbe_stats txgbe_gstrings_stats[] = { + TXGBE_STAT("rx_pkts_nic", stats.gprc), + TXGBE_STAT("tx_pkts_nic", stats.gptc), + TXGBE_STAT("rx_bytes_nic", stats.gorc), + TXGBE_STAT("tx_bytes_nic", stats.gotc), + TXGBE_STAT("lsc_int", lsc_int), + TXGBE_STAT("tx_busy", tx_busy), + TXGBE_STAT("non_eop_descs", non_eop_descs), + TXGBE_STAT("rx_broadcast", stats.bprc), + TXGBE_STAT("tx_broadcast", stats.bptc), + TXGBE_STAT("rx_multicast", stats.mprc), + TXGBE_STAT("tx_multicast", stats.mptc), + TXGBE_STAT("rx_mac_good", stats.tpr), + TXGBE_STAT("rdb_pkts", stats.rdpc), + TXGBE_STAT("rdb_drop", stats.rddc), + TXGBE_STAT("tdm_pkts", stats.tdmpc), + TXGBE_STAT("tdm_drop", stats.tdmdc), + TXGBE_STAT("tdb_pkts", stats.tdbpc), + TXGBE_STAT("rx_parser_pkts", stats.psrpc), + TXGBE_STAT("rx_parser_drop", stats.psrdc), + TXGBE_STAT("lsec_untag_pkts", stats.untag), + TXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + TXGBE_STAT("tx_timeout_count", tx_timeout_count), + TXGBE_STAT("tx_restart_queue", restart_queue), + TXGBE_STAT("rx_long_length_count", stats.roc), + TXGBE_STAT("rx_short_length_count", stats.ruc), + TXGBE_STAT("tx_flow_control_xon", stats.lxontxc), + TXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + TXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + TXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + TXGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + TXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + TXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + TXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +#ifndef TXGBE_NO_LRO + TXGBE_STAT("lro_aggregated", lro_stats.coal), + TXGBE_STAT("lro_flushed", lro_stats.flushed), +#endif /* TXGBE_NO_LRO */ + TXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + TXGBE_STAT("hw_rsc_aggregated", rsc_total_count), + TXGBE_STAT("hw_rsc_flushed", rsc_total_flush), +#ifdef HAVE_TX_MQ + TXGBE_STAT("fdir_match", stats.fdirmatch), + TXGBE_STAT("fdir_miss", stats.fdirmiss), + TXGBE_STAT("fdir_overflow", fdir_overflow), +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + TXGBE_STAT("fcoe_bad_fccrc", stats.fccrc), + TXGBE_STAT("fcoe_last_errors", stats.fclast), + TXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc), + TXGBE_STAT("rx_fcoe_packets", stats.fcoeprc), + TXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc), + TXGBE_STAT("fcoe_noddp", stats.fcoe_noddp), + TXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff), + TXGBE_STAT("tx_fcoe_packets", stats.fcoeptc), + TXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc), +#endif /* CONFIG_FCOE */ + TXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + TXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + TXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + TXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), +#ifdef HAVE_PTP_1588_CLOCK + TXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + TXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +#endif /* HAVE_PTP_1588_CLOCK */ +}; + +/* txgbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define TXGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define TXGBE_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define TXGBE_NUM_RX_QUEUES adapter->indices +#define TXGBE_NUM_TX_QUEUES adapter->indices +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define TXGBE_NUM_RX_QUEUES 1 +#define TXGBE_NUM_TX_QUEUES ( \ + ((struct txgbe_adapter *)netdev_priv(netdev))->num_tx_queues) +#endif /* HAVE_TX_MQ */ + +#define TXGBE_QUEUE_STATS_LEN ( \ + (TXGBE_NUM_TX_QUEUES + TXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct txgbe_queue_stats) / sizeof(u64))) +#define TXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(txgbe_gstrings_stats) +#define TXGBE_NETDEV_STATS_LEN ARRAY_SIZE(txgbe_gstrings_net_stats) +#define TXGBE_PB_STATS_LEN ( \ + (sizeof(((struct txgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define TXGBE_STATS_LEN (TXGBE_GLOBAL_STATS_LEN + \ + TXGBE_NETDEV_STATS_LEN + \ + TXGBE_PB_STATS_LEN + \ + TXGBE_QUEUE_STATS_LEN) + +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char txgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define TXGBE_TEST_LEN (sizeof(txgbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +struct txgbe_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u64 flag; + bool read_only; +}; + +#define TXGBE_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct txgbe_priv_flags txgbe_gstrings_priv_flags[] = { + TXGBE_PRIV_FLAG("lldp", TXGBE_ETH_PRIV_FLAG_LLDP, 0), +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + TXGBE_PRIV_FLAG("legacy-rx", TXGBE_ETH_PRIV_FLAG_LEGACY_RX, 0), +#endif +}; + +#define TXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(txgbe_gstrings_priv_flags) + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +/* currently supported speeds for 10G */ +#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full) + +#define txgbe_isbackplane(type) \ + ((type == txgbe_media_type_backplane) ? true : false) + +#ifdef ETHTOOL_GLINKSETTINGS +static int txgbe_set_advertising_1g_10gtypes(struct txgbe_hw *hw, + struct ethtool_link_ksettings *cmd, u32 advertised_speed) { - struct txgbe *txgbe = netdev_to_txgbe(netdev); + switch (hw->phy.sfp_type) { + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseLR_Full); + } + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + } + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseSR_Full); + } + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + } + break; + case txgbe_sfp_type_lr: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseLR_Full); + } + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + } + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + break; + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + break; + } + + return 0; +} + +static int txgbe_set_supported_1g_10gtypes(struct txgbe_hw *hw, + struct ethtool_link_ksettings *cmd) +{ + switch (hw->phy.sfp_type) { + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseLR_Full); + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseSR_Full); + break; + case txgbe_sfp_type_lr: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseLR_Full); + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + break; + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + break; + } - return phylink_ethtool_nway_reset(txgbe->phylink); + return 0; } static int txgbe_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct txgbe *txgbe = netdev_to_txgbe(netdev); + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link; + u32 link_speed = 0; + bool autoneg = false; + bool link_up; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + autoneg = adapter->backplane_an ? 1:0; + else if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + autoneg = adapter->autoneg?1:0; + + /* set the supported link speeds */ + if (hw->phy.media_type == txgbe_media_type_copper) { + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber_qsfp) { + if (supported_link & TXGBE_LINK_SPEED_40GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 40000baseSR4_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + if (supported_link & TXGBE_LINK_SPEED_25GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + + if ((supported_link & TXGBE_LINK_SPEED_10GB_FULL) || + (supported_link & TXGBE_LINK_SPEED_1GB_FULL)) + txgbe_set_supported_1g_10gtypes(hw, cmd); + if (hw->phy.multispeed_fiber && hw->mac.type == txgbe_mac_sp) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseX_Full); + } else { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseKX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + break; + } + } + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_40GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 40000baseSR4_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_25GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + txgbe_set_advertising_1g_10gtypes(hw, cmd, + hw->phy.autoneg_advertised); + } else { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + } + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else if (hw->phy.media_type == txgbe_media_type_fiber) + txgbe_set_advertising_1g_10gtypes(hw, cmd, + hw->phy.autoneg_advertised); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } + } else { + if (supported_link & TXGBE_LINK_SPEED_40GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 40000baseSR4_Full); + } + if (supported_link & TXGBE_LINK_SPEED_25GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + } + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + txgbe_set_advertising_1g_10gtypes(hw, cmd, + TXGBE_LINK_SPEED_10GB_FULL); + } else { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + break; + } + } + } + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else if (hw->phy.media_type == txgbe_media_type_fiber) + txgbe_set_advertising_1g_10gtypes(hw, cmd, + TXGBE_LINK_SPEED_1GB_FULL); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } + if (supported_link & TXGBE_LINK_SPEED_100_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (supported_link & TXGBE_LINK_SPEED_10_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } + } + + if (autoneg) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case txgbe_phy_tn: + case txgbe_phy_aq: + case txgbe_phy_cu_unknown: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case txgbe_phy_qt: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + case txgbe_phy_nl: + case txgbe_phy_sfp_passive_tyco: + case txgbe_phy_sfp_passive_unknown: + case txgbe_phy_sfp_active_unknown: + case txgbe_phy_sfp_ftl_active: + case txgbe_phy_sfp_ftl: + case txgbe_phy_sfp_avago: + case txgbe_phy_sfp_intel: + case txgbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_qsfp_type_40g_cu_core0: + case txgbe_qsfp_type_40g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_DA; + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_lr: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + case txgbe_sfp_type_25g_aoc_core0: + case txgbe_sfp_type_25g_aoc_core1: + case txgbe_qsfp_type_40g_sr_core0: + case txgbe_qsfp_type_40g_sr_core1: + case txgbe_qsfp_type_40g_lr_core0: + case txgbe_qsfp_type_40g_lr_core1: + case txgbe_qsfp_type_40g_active_core0: + case txgbe_qsfp_type_40g_active_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + case txgbe_sfp_type_not_present: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_NONE; + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case txgbe_sfp_type_unknown: + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + break; + case txgbe_phy_xaui: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case txgbe_phy_unknown: + case txgbe_phy_generic: + case txgbe_phy_sfp_unsupported: + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI && + (hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) { + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = -1; + cmd->base.duplex = -1; + + return 0; + } + } + if (!in_interrupt()) { + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + /* Indicate pause support */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case txgbe_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case txgbe_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case txgbe_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, + Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + cmd->base.speed = SPEED_40000; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + cmd->base.speed = SPEED_25000; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + cmd->base.speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case TXGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case TXGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + +#ifdef ETHTOOL_GFECPARAM + if (hw->mac.type == txgbe_mac_aml) { + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_BASER); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_OFF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_NONE); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_RS); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_BASER); + } +#endif + + if(!adapter->autoneg) + ethtool_link_ksettings_del_link_mode(cmd, advertising, Autoneg); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = adapter->autoneg; + + return 0; +} +#else /* !ETHTOOL_GLINKSETTINGS */ +static __u32 txgbe_backplane_type(struct txgbe_hw *hw) +{ + __u32 mode = 0x00; + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + mode = SUPPORTED_10000baseKX4_Full; + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + mode = SUPPORTED_10000baseKR_Full; + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + mode = SUPPORTED_1000baseKX_Full; + break; + default: + mode = (SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_1000baseKX_Full); + break; + } + return mode; +} + +int txgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + bool link_up = false; + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + autoneg = adapter->backplane_an ? 1:0; + else if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + autoneg = adapter->autoneg?1:0; + + /* set the supported link speeds */ + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ecmd->supported |= (txgbe_isbackplane(hw->phy.media_type)) ? + txgbe_backplane_type(hw) : SUPPORTED_10000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= (txgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; + + /* default advertised speed if phy.autoneg_advertised isn't set */ + ecmd->advertising = ecmd->supported; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= (ecmd->supported & ADVERTISED_MASK_10G); + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + } else { + /* default modes in case phy.autoneg_advertised isn't set */ + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= (txgbe_isbackplane(hw->phy.media_type)) ? + txgbe_backplane_type(hw) : SUPPORTED_10000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + } + + if (autoneg) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case txgbe_phy_tn: + case txgbe_phy_aq: + case txgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case txgbe_phy_qt: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case txgbe_phy_nl: + case txgbe_phy_sfp_passive_tyco: + case txgbe_phy_sfp_passive_unknown: + case txgbe_phy_sfp_ftl_active: + case txgbe_phy_sfp_ftl: + case txgbe_phy_sfp_avago: + case txgbe_phy_sfp_intel: + case txgbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_qsfp_type_40g_cu_core0: + case txgbe_qsfp_type_40g_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_lr: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + case txgbe_sfp_type_25g_aoc_core0: + case txgbe_sfp_type_25g_aoc_core1: + case txgbe_qsfp_type_40g_sr_core0: + case txgbe_qsfp_type_40g_sr_core1: + case txgbe_qsfp_type_40g_lr_core0: + case txgbe_qsfp_type_40g_lr_core1: + case txgbe_qsfp_type_40g_active_core0: + case txgbe_qsfp_type_40g_active_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case txgbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case txgbe_sfp_type_unknown: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + break; + case txgbe_phy_xaui: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case txgbe_phy_unknown: + case txgbe_phy_generic: + case txgbe_phy_sfp_unsupported: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI && + (hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) { + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) { + ecmd->speed = -1; + ecmd->duplex = -1; - return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); + return 0; + } + } + if (!in_interrupt()) { + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case txgbe_fc_full: + ecmd->advertising |= ADVERTISED_Pause; + break; + case txgbe_fc_rx_pause: + ecmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case txgbe_fc_tx_pause: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + ecmd->speed = SPEED_25000; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + ecmd->speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + ecmd->speed = SPEED_1000; + break; + case TXGBE_LINK_SPEED_100_FULL: + ecmd->speed = SPEED_100; + break; + case TXGBE_LINK_SPEED_10_FULL: + ecmd->speed = SPEED_10; + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + if(!adapter->autoneg) + ecmd->advertising &= ~ADVERTISED_Autoneg; + ecmd->autoneg = adapter->autoneg?AUTONEG_ENABLE:AUTONEG_DISABLE; + return 0; } +#endif /* !ETHTOOL_GLINKSETTINGS */ + +#ifdef ETHTOOL_GLINKSETTINGS static int txgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) + const struct ethtool_link_ksettings *cmd) { - struct txgbe *txgbe = netdev_to_txgbe(netdev); + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 advertised, old, link_support; + bool autoneg; + s32 err = 0; + struct ethtool_link_ksettings temp_ks; + u32 curr_autoneg = 2; + + if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + adapter->backplane_an = cmd->base.autoneg ? 1 : 0; + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + adapter->autoneg = cmd->base.autoneg ? 1 : 0; + if ((hw->phy.media_type == txgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + /* To be compatible with test cases */ + if (hw->phy.media_type == txgbe_media_type_fiber) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseSR_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 25000baseSR_Full); + } + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 10000baseT_Full); +#ifndef HAVE_NO_ETHTOOL_10000SR + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 10000baseSR_Full); +#endif +#ifndef HAVE_NO_ETHTOOL_10000LR + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 10000baseLR_Full); +#endif + } + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); +#ifndef HAVE_NO_ETHTOOL_1000X + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); +#endif + } + } + + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (!bitmap_subset(cmd->link_modes.advertising, temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + if (!cmd->base.autoneg) { + if (cmd->base.speed == SPEED_25000) + advertised |= TXGBE_LINK_SPEED_25GB_FULL; + else if (cmd->base.speed == SPEED_10000) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + else if (cmd->base.speed == SPEED_1000) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + else + advertised |= old; + }else{ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 25000baseSR_Full)) + advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100baseT_Full)) + advertised |= TXGBE_LINK_SPEED_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10baseT_Full)) + advertised |= TXGBE_LINK_SPEED_10_FULL; + } + + if (advertised == TXGBE_LINK_SPEED_1GB_FULL && + hw->phy.media_type != txgbe_media_type_copper) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + if (old == advertised && (curr_autoneg == !!(cmd->base.autoneg))) + return 0; + } + + err = TCALL(hw, mac.ops.get_link_capabilities, + &link_support, &autoneg); + if (err) + e_info(probe, "get link capabiliyies failed with code %d\n", err); + if (!(link_support & advertised)) { + e_info(probe, "unsupported advertised: %x", advertised); + return -EINVAL; + } + + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + adapter->autoneg = cmd->base.autoneg ? 1 : 0; + hw->mac.autotry_restart = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + txgbe_service_event_schedule(adapter); + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, cmd->base.autoneg); + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + if (!cmd->base.autoneg) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full) & + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full) & + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full)) + return -EINVAL; + } - return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); + advertised = 0; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKR_Full)) { + err = txgbe_set_link_to_kr(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full)) { + err = txgbe_set_link_to_kx4(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full)) { + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); + txgbe_set_sgmii_an37_ability(hw); + } + if (err) + err = -EACCES; + + return err; + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = cmd->base.speed; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + return -EINVAL; + } else if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseSR_Full))) { + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (!ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } else if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full))) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); +#ifndef HAVE_NO_ETHTOOL_1000X + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); +#endif + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseKX_Full); + } + + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + +#if 0 + if (hw->mac.type == txgbe_mac_aml) { + curr_autoneg = txgbe_rd32_epcs(hw, SR_AN_CTRL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } +#endif + if (advertised == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + if (old == advertised && (curr_autoneg == !!cmd->base.autoneg)) + return -EINVAL; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + adapter->autoneg = cmd->base.autoneg?1:0; + hw->mac.autotry_restart = true; + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, cmd->base.autoneg); + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } + adapter->autoneg = cmd->base.autoneg?1:0; + } + if (err) + return -EINVAL; + + return err; } +#else /* !ETHTOOL_GLINKSETTINGS */ +static int txgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 advertised, old; + u32 curr_autoneg = 2; + s32 err = 0; -static const struct ethtool_ops txgbe_ethtool_ops = { - .get_drvinfo = wx_get_drvinfo, - .nway_reset = txgbe_nway_reset, - .get_link = ethtool_op_get_link, - .get_link_ksettings = txgbe_get_link_ksettings, - .set_link_ksettings = txgbe_set_link_ksettings, -}; + if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + adapter->backplane_an = ecmd->autoneg ? 1 : 0; + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + adapter->autoneg = ecmd->autoneg? 1 : 0; -void txgbe_set_ethtool_ops(struct net_device *netdev) + if ((hw->phy.media_type == txgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (ecmd->advertising & ~ecmd->supported) + return -EINVAL; + + /* only allow one speed at a time if no autoneg */ + if (!ecmd->autoneg && hw->phy.multispeed_fiber) { + if (ecmd->advertising == + (ADVERTISED_10000baseT_Full | ADVERTISED_1000baseT_Full)) + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ecmd->advertising & ADVERTISED_10000baseT_Full) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= TXGBE_LINK_SPEED_100_FULL; + + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= TXGBE_LINK_SPEED_10_FULL; + + if (advertised == TXGBE_LINK_SPEED_1GB_FULL && + hw->phy.media_type != txgbe_media_type_copper) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + if (old == advertised && (curr_autoneg == !!ecmd->autoneg)) + return err; + } + + if (advertised == TXGBE_LINK_SPEED_10GB_FULL && + ecmd->autoneg == AUTONEG_DISABLE) + return -EINVAL; + + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + adapter->autoneg = ecmd->autoneg ? 1 : 0; + hw->mac.autotry_restart = true; + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } + + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, ecmd->autoneg); + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + if (!ecmd->autoneg) { + if (ecmd->advertising == + (ADVERTISED_10000baseKR_Full | ADVERTISED_1000baseKX_Full | + ADVERTISED_10000baseKX4_Full)) + return -EINVAL; + } + + advertised = 0; + if (ecmd->advertising & ADVERTISED_10000baseKR_Full) { + err = txgbe_set_link_to_kr(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + } else if (ecmd->advertising & ADVERTISED_10000baseKX4_Full) { + err = txgbe_set_link_to_kx4(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + } else if (ecmd->advertising & ADVERTISED_1000baseKX_Full) { + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); + txgbe_set_sgmii_an37_ability(hw); + } + if (err) + return -EACCES; + return err; + } else { + /* in this case we currently only support 10Gb/FULL and 1Gb/FULL*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + return -EINVAL; + } else if (ecmd->advertising & ADVERTISED_1000baseT_Full) { + if (ecmd->advertising & ~ecmd->supported) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (advertised == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + if (old == advertised && (curr_autoneg == !!ecmd->autoneg)) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + adapter->autoneg = ecmd->autoneg ? 1 : 0; + hw->mac.autotry_restart = true; + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, ecmd->autoneg); + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } + adapter->autoneg = ecmd->autoneg ? 1 : 0; + } + + if (err) + return -EINVAL; + + return err; +} +#endif /* !ETHTOOL_GLINKSETTINGS */ +#ifdef ETHTOOL_GFECPARAM +static int txgbe_get_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) { - netdev->ethtool_ops = &txgbe_ethtool_ops; + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + bool autoneg = false; + u32 speed = 0; + bool link_up; + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if (hw->mac.type != txgbe_mac_aml) { + err = -EAGAIN; + goto done; + } + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + fecparam->fec = 0; + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + goto done; + } + if (adapter->fec_link_mode == TXGBE_PHY_FEC_AUTO) + fecparam->fec |= ETHTOOL_FEC_AUTO; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + fecparam->fec |= ETHTOOL_FEC_BASER; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + fecparam->fec |= ETHTOOL_FEC_RS; + else + fecparam->fec |= ETHTOOL_FEC_OFF; + + if (!link_up) { + fecparam->active_fec = ETHTOOL_FEC_OFF; + goto done; + } + switch (adapter->cur_fec_link) { + case TXGBE_PHY_FEC_BASER: + fecparam->active_fec = ETHTOOL_FEC_BASER; + break; + case TXGBE_PHY_FEC_RS: + fecparam->active_fec = ETHTOOL_FEC_RS; + break; + case TXGBE_PHY_FEC_OFF: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + default: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } +done: + return err; +} + +static int txgbe_set_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u8 cur_fec_mode = adapter->fec_link_mode; + bool autoneg = false; + u32 supported_link = 0; + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if (hw->mac.type != txgbe_mac_aml) { + err = -EAGAIN; + goto done; + } + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + adapter->fec_link_mode = TXGBE_PHY_FEC_AUTO; + break; + case ETHTOOL_FEC_BASER: + adapter->fec_link_mode = TXGBE_PHY_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + adapter->fec_link_mode = TXGBE_PHY_FEC_OFF; + break; + case ETHTOOL_FEC_RS: + adapter->fec_link_mode = TXGBE_PHY_FEC_RS; + break; + default: + e_warn(drv, "Unsupported FEC mode: %d", + fecparam->fec); + err = -EINVAL; + goto done; + } + if (cur_fec_mode != adapter->fec_link_mode) { + /* reset link */ + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } +done: + return err; +} +#endif /* ETHTOOL_GFECPARAM */ +static void txgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (txgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == txgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == txgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == txgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int txgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_fc_info fc = hw->fc; + + + /* some devices do not support autoneg of flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !txgbe_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = txgbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = txgbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = txgbe_fc_tx_pause; + else + fc.requested_mode = txgbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct txgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); + } + + return 0; +} + +static u32 txgbe_get_msglevel(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void txgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int txgbe_get_regs_len(struct net_device __always_unused *netdev) +{ +#define TXGBE_REGS_LEN 4096 + return TXGBE_REGS_LEN * sizeof(u32); +} + +#define TXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + + +static void txgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, TXGBE_REGS_LEN * sizeof(u32)); + regs_buff[TXGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_PWR);//0 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_CTL);//1 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_PF_SM);//2 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_RST);//3 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_ST);//4 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_SWSM);//5 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_CTL);//7 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_EN);//8 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ST);//9 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_INT_EN);//12 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMD);//14 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_DATA);//15 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_STATUS);//16 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_USR_CMD);//17 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_PORT_ST);//23 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_VXLAN);//25 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_VXLAN_GPE);//26 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_GENEVE);//27 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TEREDO);//28 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TCP_TIME);//29 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_LED_CTL);//30 + /* GPIO */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_DR);//31 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_DDR);//32 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_CTL);//33 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTEN);//34 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTMASK);//35 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTSTATUS);//36 + /* I2C */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CON);//37 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TAR);//38 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_DATA_CMD);//39 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SS_SCL_HCNT);//40 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SS_SCL_LCNT);//41 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SCL_HCNT);//42 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SCL_LCNT);//43 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_HS_SCL_HCNT);//44 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_INTR_STAT);//45 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_INTR_MASK);//46 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RAW_INTR_STAT);//47 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RX_TL);//48 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TX_TL);//49 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_INTR);//50 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_UNDER);//51 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_OVER);//52 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_TX_OVER);//53 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RD_REQ);//54 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_TX_ABRT);//55 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_DONE);//56 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_ACTIVITY);//57 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_STOP_DET);//58 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_START_DET);//59 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_GEN_CALL);//60 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_ENABLE);//61 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_STATUS);//62 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TXFLR);//63 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RXFLR);//64 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_HOLD);//65 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TX_ABRT_SOURCE);//66 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_SETUP);//67 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_ENABLE_STATUS);//68 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SPKLEN);//69 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_HS_SPKLEN);//70 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT);//71 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT);//72 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_SCL_STUCK_DET);//73 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_DEVICE_ID);//74 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_PARAM_1);//75 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_VERSION);//76 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_TYPE);//77 + /* TX TPH */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_TDESC);//78 + /* RX TPH */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RDESC);//79 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RHDR);//80 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RPL);//81 + + /* TDMA */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_CTL);//82 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VF_TE(0));//83 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VF_TE(1));//84 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PB_THRE(i));//85-92 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_LLQ(i));//93-96 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_LB_L);//97 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_LB_H);//98 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_AS_L);//99 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_AS_H);//100 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MAC_AS_L);//101 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MAC_AS_H);//102 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_AS_L);//103 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_AS_H);//104 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_TCP_FLG_L);//105 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_TCP_FLG_H);//106 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_INS(i));//107-234 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETAG_INS(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PBWARB_CTL);//235 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MMW);//236 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PBWARB_CFG(i));//237-244 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VM_CREDIT(i));//245-372 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_FC_EOF);//373 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_FC_SOF);//374 + + /* RDMA */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_ARB_CTL);//375 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_VF_RE(0));//376 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_VF_RE(1));//377 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_RSC_CTL);//378 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_ARB_CFG(i));//379-386 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_PF_QDE(i));//387-394 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_PF_HIDE(i)); + } + + /* RDB */ + /*flow control */ + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCV(i));//395-398 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCL(i));//399-414 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCH(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCRT);//415 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCC);//416 + /* receive packet buffer */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_CTL);//417 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_WRAP);//418 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_UP2TC);//419 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_SZ(i));//420-435 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_MPCNT(i)); + } + /* lli interrupt */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_LLI_THRE);//436 + /* ring assignment */ + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PL_CFG(i));//437-500 + } + for (i = 0; i < 32; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSSTBL(i));//501-532 + } + for (i = 0; i < 10; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSSRK(i));//533-542 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSS_TC);//543 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RA_CTL);//544 + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_SA(i));//545-1184 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_DA(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_SDP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_CTL0(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_CTL1(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_SYN_CLS);//1185 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_ETYPE_CLS(i));//1186-1193 + } + /* fcoe redirection table */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FCRE_CTL);//1194 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FCRE_TBL(i));//1195-1202 + } + /*flow director */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_CTL);//1203 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_HKEY);//1204 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SKEY);//1205 + for (i = 0; i < 16; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FLEX_CFG(i));//1206-1221 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FREE);//1222 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_LEN);//1223 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_USE_ST);//1224 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FAIL_ST);//1225 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_MATCH);//1226 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_MISS);//1227 + for (i = 0; i < 3; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_IP6(i));//1228-1230 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SA);//1231 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_DA);//1232 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_PORT);//1233 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FLEX);//1234 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_HASH);//1235 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_CMD);//1236 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_DA4_MSK);//1237 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SA4_MSK);//1238 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_TCP_MSK);//1239 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_UDP_MSK);//1240 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SCTP_MSK);//1241 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_IP6_MSK);//1242 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_OTHER_MSK);//1243 + + /* PSR */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_CTL);//1244 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_CTL);//1245 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VM_CTL);//1246 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VM_L2CTL(i));//1247-1310 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_ETYPE_SWC(i));//1311-1318 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MC_TBL(i));//1319-1702 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_UC_TBL(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_TBL(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_AD_L);//1703 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_AD_H);//1704 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_VM_L);//1705 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_VM_H);//1706 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_IDX);//1707 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC);//1708 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_VM_L);//1709 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_VM_H);//1710 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_IDX);//1711 + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_CTL(i));//1712-1731 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VLAN_L(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VLAN_H(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VM_L(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VM_H(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_CTL);//1732 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_STMPL);//1733 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_STMPH);//1734 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_ATTRL);//1735 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_ATTRH);//1736 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_MSGTYPE);//1737 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_CTL);//1738 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IPV);//1739 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL);//1740 + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IP4TBL(i));//1741-1748 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IP6TBL(i)); + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_DW_L(i));//1749-1796 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_DW_H(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_MSK(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL);//1797 + + /* TDB */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_TFCS);//1798 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PB_SZ(0));//1799 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_UP2TC);//1800 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CTL);//1801 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CFG(i));//1802-1809 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_MNG_TC);//1810 + + /* tsec */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_CTL);//1811 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_ST);//1812 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_BUF_AF);//1813 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_BUF_AE);//1814 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_MIN_IFG);//1815 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_CTL);//1816 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_STMPL);//1817 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_STMPH);//1818 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_SYSTIML);//1819 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_SYSTIMH);//1820 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_INC);//1821 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_ADJL);//1822 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_ADJH);//1823 + + /* RSEC */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RSC_CTL);//1824 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RSC_ST);//1825 + + /* BAR register */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IC);//1826 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_ICS);//1827 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IEN);//1828 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_GPIE);//1829 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IC(0));//1830 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IC(1));//1831 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ICS(0));//1832 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ICS(1));//1833 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMS(0));//1834 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMS(1));//1835 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMC(0));//1836 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMC(1));//1837 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ISB_ADDR_L);//1838 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ISB_ADDR_H);//1839 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ITRSEL);//1840 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ITR(i));//1841-1968 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IVAR(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IVAR);//1969 + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_BAL(i));//1970-3249 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_BAH(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_WP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_RP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_CFG(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_BAL(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_BAH(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_WP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_RP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_CFG(i)); + } +} + +static int txgbe_get_eeprom_len(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int txgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = TCALL(hw, eeprom.ops.read_buffer, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int txgbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = TCALL(hw, eeprom.ops.write_buffer, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + TCALL(hw, eeprom.ops.update_checksum); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void txgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, txgbe_driver_name, + sizeof(drvinfo->driver)); + strscpy(drvinfo->version, txgbe_driver_version, + sizeof(drvinfo->version)); + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) { + drvinfo->n_stats = TXGBE_STATS_LEN - + (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct txgbe_queue_stats) / sizeof(u64))*2; + }else{ + drvinfo->n_stats = TXGBE_STATS_LEN; + } + drvinfo->testinfo_len = TXGBE_TEST_LEN; + drvinfo->regdump_len = txgbe_get_regs_len(netdev); +} + +static void txgbe_get_ringparam(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = TXGBE_MAX_RXD; + ring->tx_max_pending = TXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int txgbe_set_ringparam(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct txgbe_ring *tx_ring = NULL, *rx_ring = NULL; + struct txgbe_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + int i, j, err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + TXGBE_MIN_TXD, TXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + TXGBE_MIN_RXD, TXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + /* If there is a AF_XDP UMEM attached to any of Rx rings, + * disallow changing the number of descriptors -- regardless + * if the netdev is running or not. + */ + if (txgbe_xsk_any_rx_ring_enabled(adapter)) + return -EBUSY; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_xdp_queues; i++) + adapter->xdp_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto done; + } + + i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, + adapter->num_rx_queues); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + netdev_info(netdev, + "Changing Tx descriptor count from %d to %d.\n", + adapter->tx_ring[0]->count, new_tx_count); + tx_ring = kcalloc(i, sizeof(struct txgbe_ring), GFP_KERNEL); + if (!tx_ring) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&tx_ring[i], adapter->tx_ring[i], + sizeof(struct txgbe_ring)); + + tx_ring[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + tx_ring[i].desc = NULL; + tx_ring[i].tx_buffer_info = NULL; + err = txgbe_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_tx_resources(&tx_ring[i]); + } + + kfree(tx_ring); + tx_ring = NULL; + err = -ENOMEM; + + goto done; + } + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + memcpy(&tx_ring[i], adapter->xdp_ring[j], + sizeof(struct txgbe_ring)); + + tx_ring[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + tx_ring[i].desc = NULL; + tx_ring[i].tx_buffer_info = NULL; + err = txgbe_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_tx_resources(&tx_ring[i]); + } + + kfree(tx_ring); + tx_ring = NULL; + err = -ENOMEM; + + goto done; + } + } + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + netdev_info(netdev, + "Changing Rx descriptor count from %d to %d\n", + adapter->rx_ring[0]->count, new_rx_count); + rx_ring = kcalloc(i, sizeof(struct txgbe_ring), GFP_KERNEL); + if (!rx_ring) { + err = -ENOMEM; + goto free_tx; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + u16 unused; + + memcpy(&rx_ring[i], adapter->rx_ring[i], + sizeof(struct txgbe_ring)); +#ifdef HAVE_XDP_BUFF_RXQ + xdp_rxq_info_unreg(&rx_ring[i].xdp_rxq); +#endif + rx_ring[i].count = new_rx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + rx_ring[i].desc = NULL; + rx_ring[i].rx_buffer_info = NULL; + err = txgbe_setup_rx_resources(&rx_ring[i]); + if (err) + goto rx_unwind; + + unused = txgbe_desc_unused(&rx_ring[i]); + err = txgbe_alloc_rx_buffers(&rx_ring[i], unused); +rx_unwind: + if (err) { + err = -ENOMEM; + + do { + txgbe_free_rx_resources(&rx_ring[i]); + } while (i--); + kfree(rx_ring); + rx_ring = NULL; + + goto free_tx; + } + } + } + + /* Bring interface down, copy in the new ring info, + * then restore the interface + */ + txgbe_down(adapter); + + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + txgbe_free_tx_resources(adapter->xdp_ring[j]); + memcpy(adapter->xdp_ring[j], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + kfree(tx_ring); + tx_ring = NULL; + } + + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + txgbe_free_rx_resources(adapter->rx_ring[i]); + /* this is to fake out the allocation routine + * into thinking it has to realloc everything + * but the recycling logic will let us re-use + * the buffers allocated above + */ + rx_ring[i].next_to_use = 0; + rx_ring[i].next_to_clean = 0; + rx_ring[i].next_to_alloc = 0; + /* do a struct copy */ + memcpy(adapter->rx_ring[i], &rx_ring[i], + sizeof(struct txgbe_ring)); + } + kfree(rx_ring); + rx_ring = NULL; + } + + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + + txgbe_up(adapter); + +free_tx: +/* error cleanup if the Rx allocations failed after getting Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + txgbe_free_tx_resources(adapter->xdp_ring[j]); + memcpy(adapter->xdp_ring[j], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + kfree(tx_ring); + tx_ring = NULL; + } + +done: + clear_bit(__TXGBE_RESETTING, &adapter->state); + + return err; } + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int txgbe_get_stats_count(struct net_device *netdev) +{ + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) { + return TXGBE_STATS_LEN - (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct txgbe_queue_stats) / sizeof(u64))*2; + }else{ + return TXGBE_STATS_LEN; + } + +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static int txgbe_get_sset_count(struct net_device *netdev, int sset) +{ +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct txgbe_adapter *adapter = netdev_priv(netdev); +#endif +#endif + struct txgbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return TXGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) { + return TXGBE_STATS_LEN - (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct txgbe_queue_stats) / sizeof(u64))*2; + }else{ + return TXGBE_STATS_LEN; + } + case ETH_SS_PRIV_FLAGS: + return TXGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +/** + * txgbe_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the txgbe_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 txgbe_get_priv_flags(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u32 i, ret_flags = 0; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct txgbe_priv_flags *priv_flags; + + priv_flags = &txgbe_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->eth_priv_flags) + ret_flags |= BIT(i); + } + return ret_flags; +} + +/** + * txgbe_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int txgbe_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u32 orig_flags, new_flags, changed_flags; + bool reset_needed = 0; + u32 i; + s32 status = 0; + + orig_flags = adapter->eth_priv_flags; + new_flags = orig_flags; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct txgbe_priv_flags *priv_flags; + + priv_flags = &txgbe_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & BIT(i))) + return -EOPNOTSUPP; + } + + changed_flags = orig_flags ^ new_flags; + + if(!changed_flags) return 0; + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) + reset_needed = 1; + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) { + status = txgbe_hic_write_lldp(&adapter->hw, (u32)(new_flags & TXGBE_ETH_PRIV_FLAG_LLDP)); + if (!status) + adapter->eth_priv_flags = new_flags; + } + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LEGACY_RX) { + adapter->eth_priv_flags = new_flags; + + if (adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LEGACY_RX) + adapter->flags2 |= TXGBE_FLAG2_RX_LEGACY; + else + adapter->flags2 &= ~TXGBE_FLAG2_RX_LEGACY; + + /* reset interface to repopulate queues */ + if (netif_running(dev)) + txgbe_reinit_locked(adapter); + } +#endif + + return status; +} + +#endif /*HAVE_ETHTOOL_GET_SSET_COUNT*/ + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static void txgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif +#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; +#endif + struct txgbe_ring *ring; + int i, j; + char *p; + + txgbe_update_stats(adapter); + + for (i = 0; i < TXGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + txgbe_gstrings_net_stats[i].stat_offset; + data[i] = (txgbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < TXGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + txgbe_gstrings_stats[j].stat_offset; + data[i] = (txgbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + for (j = 0; j < TXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < TXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } +} + +static void txgbe_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + txgbe_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + +static void txgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *txgbe_gstrings_test, + TXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < TXGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, txgbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, txgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < TXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != TXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + txgbe_get_priv_flag_strings(netdev, data); + break; + } +} + +static int txgbe_link_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + bool link_up = false; + u32 link_speed = 0; + + if (TXGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct txgbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default sapphire register test */ +static struct txgbe_reg_test reg_test_sapphire[] = { + { TXGBE_RDB_RFCL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_RDB_RFCH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { TXGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { TXGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, TXGBE_PX_RR_CFG_RR_EN }, + { TXGBE_RDB_RFCH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_RDB_RFCV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { TXGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + + +static bool reg_pattern_test(struct txgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct txgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected" + "0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + +static bool txgbe_reg_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_reg_test *test; + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static bool txgbe_eeprom_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + +static irqreturn_t txgbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct txgbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = txgbe_misc_isb(adapter, TXGBE_ISB_VEC1); + icr <<= 32; + icr |= txgbe_misc_isb(adapter, TXGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + +static int txgbe_intr_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + txgbe_setup_isb_resources(adapter); + txgbe_configure_isb(adapter); + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &txgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &txgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &txgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + txgbe_irq_disable(adapter); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + txgbe_intr_disable(&adapter->hw, ~mask); + txgbe_intr_trigger(&adapter->hw, ~mask); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + txgbe_intr_enable(&adapter->hw, mask); + txgbe_intr_trigger(&adapter->hw, mask); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + } + + /* Disable all the interrupts */ + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + txgbe_free_isb_resources(adapter); + + return *data; +} + +static void txgbe_free_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + TCALL(hw, mac.ops.disable_rx); + txgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, TXGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); + + txgbe_reset(adapter); + + txgbe_free_tx_resources(&adapter->test_tx_ring); + txgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static void txgbe_loopback_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = TXGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; +#ifdef HAVE_AF_XDP_ZC_SUPPORT + ring->xsk_pool = NULL; + if (ring_is_xdp(ring)) + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); +#endif + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + + txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize flowdirector state */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__TXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__TXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct txgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + msleep(20); + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + + +static int txgbe_setup_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = TXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = txgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + txgbe_loopback_configure_tx_ring(adapter, tx_ring); + + /* enable mac transmitter */ + + if (hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } else if (hw->mac.type == txgbe_mac_aml) { + if ((rd32(hw, TXGBE_CFG_PORT_ST) & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + else + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else { + if(txgbe_check_reset_blocked(hw) && (hw->phy.autoneg_advertised == TXGBE_LINK_SPEED_1GB_FULL || + adapter->link_speed == TXGBE_LINK_SPEED_1GB_FULL)) + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_1G); + else + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_10G); + } + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = TXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = TXGBE_RXBUFFER_2K; +#endif + + err = txgbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + TCALL(hw, mac.ops.disable_rx); + + txgbe_configure_rx_ring(adapter, rx_ring); + + TCALL(hw, mac.ops.enable_rx); + + return 0; + +err_nomem: + txgbe_free_desc_rings(adapter); + return ret_val; +} + +static int txgbe_setup_config(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup traffic loopback */ + reg_data = rd32(hw, TXGBE_PSR_CTL); + reg_data |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_UPE | + TXGBE_PSR_CTL_MPE | TXGBE_PSR_CTL_TPE; + wr32(hw, TXGBE_PSR_CTL, reg_data); + + wr32(hw, TXGBE_RSC_CTL, + (rd32(hw, TXGBE_RSC_CTL) | + TXGBE_RSC_CTL_SAVE_MAC_ERR) & ~TXGBE_RSC_CTL_SECRX_DIS); + + wr32(hw, TXGBE_RSC_LSEC_CTL, 0x4); + + wr32(hw, TXGBE_PSR_VLAN_CTL, + rd32(hw, TXGBE_PSR_VLAN_CTL) & + ~TXGBE_PSR_VLAN_CTL_VFE); + + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, ~TXGBE_MAC_RX_CFG_LM); + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS | TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK, 0xd0000); + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, 0); + } + + TXGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static int txgbe_setup_mac_loopback_test(struct txgbe_adapter *adapter) +{ + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM | TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_LM | TXGBE_MAC_RX_CFG_RE); + + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + return 0; +} + +static void txgbe_mac_loopback_cleanup(struct txgbe_adapter *adapter) +{ + wr32m(&adapter->hw, TXGBE_TSC_CTL, + TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK, 0x20000); + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, ~TXGBE_MAC_RX_CFG_LM); + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); +} + +static int txgbe_setup_phy_loopback_test(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value; + /* setup phy loopback */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_MISC_CTL0); + value |= TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 | + TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1; + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + value | TXGBE_SR_PMA_MMD_CTL1_LB_EN); + return 0; +} + +static void txgbe_phy_loopback_cleanup(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_MISC_CTL0); + value &= ~(TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 | + TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1); + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, value); + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + value & ~TXGBE_SR_PMA_MMD_CTL1_LB_EN); +} + + +static void txgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool txgbe_check_lbtest_frame(struct txgbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else + data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + +#endif + return match; +} + +static u16 txgbe_clean_test_rings(struct txgbe_ring *rx_ring, + struct txgbe_ring *tx_ring, + unsigned int size) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer; + struct txgbe_tx_buffer *tx_buffer; +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buf_len; +#else + const int bufsz = txgbe_rx_bufsz(rx_ring); +#endif + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ntc); + + while (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + rx_buffer->page_dma, +#else + rx_buffer->dma, +#endif + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (txgbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + rx_buffer->page_dma, +#else + rx_buffer->dma, +#endif + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + txgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int txgbe_run_loopback_test(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + txgbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = txgbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = txgbe_clean_test_rings(rx_ring, tx_ring, size); + if(j == 0) + continue; + else if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int txgbe_loopback_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + *data = txgbe_setup_config(adapter); + if (*data) + goto err_loopback; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + *data = txgbe_setup_mac_loopback_test(adapter); + else + *data = txgbe_setup_phy_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = txgbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = txgbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "phy loopback testing failed\n"); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_mac_loopback_cleanup(adapter); + else + txgbe_phy_loopback_cleanup(adapter); + +err_loopback: + txgbe_free_desc_rings(adapter); +out: + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + + return *data; +} + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int txgbe_diag_test_count(struct net_device __always_unused *netdev) +{ + return TXGBE_TEST_LEN; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static void txgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__TXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__TXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (txgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + txgbe_close(netdev); + else + txgbe_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (txgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + txgbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (txgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + txgbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (txgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP)){ + e_info(hw, "skip MAC loopback diagnostic when veto set\n"); + data[3] = 0; + goto skip_loopback; + } + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + txgbe_reset(adapter); + e_info(hw, "loopback testing starting\n"); + if (txgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +skip_loopback: + txgbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__TXGBE_TESTING, &adapter->state); + if (if_running) + txgbe_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (txgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__TXGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static void txgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (!device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + return; + + if (adapter->wol & TXGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; + + +} + +static int txgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + return -EOPNOTSUPP; + + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= TXGBE_PSR_WKUP_CTL_MAG; + + hw->wol_enabled = !!(adapter->wol); + wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + return 0; + + return -EOPNOTSUPP; +} + +static int txgbe_nway_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + + return 0; +} + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int txgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 value = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (hw->mac.type == txgbe_mac_aml || (hw->mac.type == txgbe_mac_aml40)) + txgbe_hic_notify_led_active(hw, 1); + adapter->led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { + if (adapter->link_up) { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_10G); + break; + case TXGBE_LINK_SPEED_1GB_FULL: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_1G); + break; + case TXGBE_LINK_SPEED_100_FULL: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_100M); + break; + default: + break; + } + } else + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_10G); + } else + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_UP); + break; + + case ETHTOOL_ID_OFF: + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { + if (adapter->link_up) { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_10G); + break; + case TXGBE_LINK_SPEED_1GB_FULL: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_1G); + break; + case TXGBE_LINK_SPEED_100_FULL: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_100M); + break; + default: + break; + } + } else + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_10G); + } else + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (hw->mac.type == txgbe_mac_aml || (hw->mac.type == txgbe_mac_aml40)) + txgbe_hic_notify_led_active(hw, 0); + wr32(&adapter->hw, TXGBE_CFG_LED_CTL, + adapter->led_reg); + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, (value & 0xFFFC) | 0x0); + } + break; + } + + return 0; +} +#else +static int txgbe_phys_id(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u32 i; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)){ + if (adapter->link_up) { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_10G); + break; + case TXGBE_LINK_SPEED_1GB_FULL: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_1G); + break; + case TXGBE_LINK_SPEED_100_FULL: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_100M); + break; + default: + break; + } + } else + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_10G); + } else + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_UP); + msleep_interruptible(200); + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { + if (adapter->link_up) { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_10G); + break; + case TXGBE_LINK_SPEED_1GB_FULL: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_1G); + break; + case TXGBE_LINK_SPEED_100_FULL: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_100M); + break; + default: + break; + } + } else + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_10G); + } else + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP); + msleep_interruptible(200); + } + + /* Restore LED settings */ + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + + return 0; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + +static int txgbe_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (adapter->rx_itr_setting == 1) + ec->use_adaptive_rx_coalesce = 1; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool txgbe_update_rsc(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* nothing to do if LRO or RSC are not enabled */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) + return false; + + /* check the feature flag value and enable RSC if necessary */ + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs value high enough " + "to re-enable RSC\n"); + return true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; +#ifdef TXGBE_NO_LRO + e_info(probe, "rx-usecs set too low, disabling RSC\n"); +#else + e_info(probe, "rx-usecs set too low, " + "falling back to software LRO\n"); +#endif + return true; + } + return false; +} + +static int txgbe_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; +#if 0 + if(ec->tx_max_coalesced_frames_irq == adapter->tx_work_limit && + ((adapter->rx_itr_setting <= 1) ? (ec->rx_coalesce_usecs == adapter->rx_itr_setting) : + (ec->rx_coalesce_usecs == adapter->rx_itr_setting >> 2))) { + e_info(probe, "no coalesce parameters changed, aborting\n"); + return -EINVAL; + } +#endif + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) { + if (ec->tx_max_coalesced_frames_irq <= TXGBE_MAX_TX_WORK) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + else + return -EINVAL; + } else + return -EINVAL; + + if ((ec->rx_coalesce_usecs > (TXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (TXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->use_adaptive_tx_coalesce) + return -EINVAL; + + if (ec->use_adaptive_rx_coalesce) { + adapter->rx_itr_setting = 1; + return 0; + } else { + /* restore to default rxusecs value when adaptive itr turn off */ + /* user shall turn off adaptive itr and set user-defined rx usecs value + * in two cmds separately. + */ + if (adapter->rx_itr_setting == 1) { + adapter->rx_itr_setting = TXGBE_20K_ITR; + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + } + } + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = TXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = TXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < TXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= TXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < TXGBE_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ + need_reset |= txgbe_update_rsc(adapter); + + if (adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling " + "is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + TCALL(hw, mac.ops.dmac_config); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + txgbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + +#ifndef HAVE_NDO_SET_FEATURES +static u32 txgbe_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int txgbe_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + /* LRO and RSC both depend on RX checksum to function */ + if (!data && (netdev->features & NETIF_F_LRO)) { + netdev->features &= ~NETIF_F_LRO; + + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } + } + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + if (!need_reset) + adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + txgbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + +static int txgbe_set_tx_csum(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (data) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + else + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + feature_list |= NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + feature_list |= NETIF_F_SCTP_CSUM; + + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +static int txgbe_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int txgbe_set_flags(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; + u32 changed = netdev->features ^ data; + bool need_reset = false; + int rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) && + !(data & ETH_FLAG_RXVLAN)) + return -EINVAL; + +#endif +#ifdef TXGBE_NO_LRO + if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) +#endif + supported_flags |= ETH_FLAG_LRO; + +#ifdef ETHTOOL_GRXRINGS + + supported_flags |= ETH_FLAG_NTUPLE; + + +#endif +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; + +#endif + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; + +#ifndef HAVE_VLAN_RX_REGISTER + if (changed & ETH_FLAG_RXVLAN) + txgbe_vlan_mode(netdev, netdev->features); + +#endif + +#ifdef HAVE_VXLAN_CHECKS + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + netdev->features & NETIF_F_RXCSUM) { + vxlan_get_rx_port(netdev); + else + txgbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + /* if state changes we need to update adapter->flags and reset */ + if (!(netdev->features & NETIF_F_LRO)) { + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if (changed & ETH_FLAG_LRO) { +#ifdef TXGBE_NO_LRO + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); +#else + e_info(probe, "rx-usecs set too low, " + "falling back to software LRO\n"); +#endif + } + } + +#ifdef ETHTOOL_GRXRINGS + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (netdev->features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + +#endif /* ETHTOOL_GRXRINGS */ + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS +static int txgbe_match_etype_entry(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].rule_idx == sw_idx) + break; + } + + return i; +} + +static int txgbe_get_etype_rule(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, int ef_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + u8 mask[6] = {0, 0, 0, 0, 0, 0}; + u8 mac[6] = {0, 0, 0, 0, 0, 0}; + + fsp->flow_type = ETHER_FLOW; + ether_addr_copy(fsp->h_u.ether_spec.h_dest, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_dest, mask); + ether_addr_copy(fsp->h_u.ether_spec.h_source, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_source, mask); + fsp->h_u.ether_spec.h_proto = htons(ef_info->etype_filters[ef_idx].ethertype); + fsp->m_u.ether_spec.h_proto = 0xFFFF; + fsp->ring_cookie = ef_info->etype_filters[ef_idx].action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union txgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node; + struct txgbe_fdir_filter *rule = NULL; + + if (adapter->etype_filter_info.count > 0) { + int ef_idx; + + ef_idx = txgbe_match_etype_entry(adapter, fsp->location); + if (ef_idx < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return txgbe_get_etype_rule(adapter, fsp, ef_idx); + } + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct hlist_node *node; + struct txgbe_fdir_filter *rule; + int cnt = 0, i; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) { + rule_locs[cnt] = ef_info->etype_filters[i].rule_idx; + cnt++; + } + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int txgbe_get_rss_hash_opts(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on txgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count + + adapter->etype_filter_info.count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = txgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = txgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = txgbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int +txgbe_ethertype_filter_lookup(struct txgbe_etype_filter_info *ef_info, + u16 ethertype) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].ethertype == ethertype && + (ef_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static int +txgbe_ethertype_filter_insert(struct txgbe_etype_filter_info *ef_info, + struct txgbe_ethertype_filter *etype_filter) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) { + continue; + } + ef_info->ethertype_mask |= 1 << i; + ef_info->etype_filters[i].ethertype = etype_filter->ethertype; + ef_info->etype_filters[i].etqf = etype_filter->etqf; + ef_info->etype_filters[i].etqs = etype_filter->etqs; + ef_info->etype_filters[i].rule_idx = etype_filter->rule_idx; + ef_info->etype_filters[i].action = etype_filter->action; + break; + } + + return (i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS ? i : -1); +} + +static int txgbe_add_ethertype_filter(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_ethertype_filter etype_filter; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + u32 etqf = 0; + u32 etqs = 0; + u8 queue, vf; + u32 ring; + int ret; + + ethertype = ntohs(fsp->h_u.ether_spec.h_proto); + if (!ethertype) { + e_err(drv, "protocol number is missing for ethertype filter\n"); + return -EINVAL; + } + if (ethertype == ETH_P_IP || ethertype == ETH_P_IPV6) { + e_err(drv, "unsupported ether_type(0x%04x) in ethertype filter\n", + ethertype); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(ef_info, ethertype); + if (ret >= 0) { + e_err(drv, "ethertype (0x%04x) filter exists.", ethertype); + return -EEXIST; + } + + /* ring_cookie is a masked into a set of queues and txgbe pools */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + e_err(drv, "drop option is unsupported."); + return -EINVAL; + } + + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!vf && ring >= adapter->num_rx_queues) + return -EINVAL; + else if (vf && ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; + + etqs |= queue << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT; + etqs |= TXGBE_RDB_ETYPE_CLS_QUEUE_EN; + etqf = TXGBE_PSR_ETYPE_SWC_FILTER_EN | ethertype; + if (adapter->num_vfs) { + u8 pool; + + if (!vf) + pool = adapter->num_vfs; + else + pool = vf - 1; + + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= pool << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + + etype_filter.ethertype = ethertype; + etype_filter.etqf = etqf; + etype_filter.etqs = etqs; + etype_filter.rule_idx = fsp->location; + etype_filter.action = fsp->ring_cookie; + ret = txgbe_ethertype_filter_insert(ef_info, &etype_filter); + if (ret < 0) { + e_err(drv, "ethertype filters are full."); + return -ENOSPC; + } + + wr32(hw, TXGBE_PSR_ETYPE_SWC(ret), etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(ret), etqs); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count++; + + return 0; +} + +static int txgbe_del_ethertype_filter(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + int idx; + + idx = txgbe_match_etype_entry(adapter, sw_idx); + if (idx == TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return -EINVAL; + + ethertype = ef_info->etype_filters[idx].ethertype; + if (!ethertype) { + e_err(drv, "ethertype filter doesn't exist."); + return -ENOENT; + } + + ef_info->ethertype_mask &= ~(1 << idx); + ef_info->etype_filters[idx].ethertype = 0; + ef_info->etype_filters[idx].etqf = 0; + ef_info->etype_filters[idx].etqs = 0; + ef_info->etype_filters[idx].etqs = FALSE; + ef_info->etype_filters[idx].rule_idx = 0; + + wr32(hw, TXGBE_PSR_ETYPE_SWC(idx), 0); + wr32(hw, TXGBE_RDB_ETYPE_CLS(idx), 0); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count--; + + return 0; + +} + +static int txgbe_update_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct txgbe_fdir_filter *input, + u16 sw_idx) +{ + struct txgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *parent; + struct txgbe_fdir_filter *rule; + bool deleted = false; + s32 err; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && + (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash))) { + err = txgbe_fdir_erase_perfect_filter(hw, + &rule->filter, + sw_idx); + if (err) + return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, parent); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + fallthrough; + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + + static bool txgbe_match_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct txgbe_fdir_filter *input) +{ + struct hlist_node *node2; + struct txgbe_fdir_filter *rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (rule->filter.formatted.bkt_hash == + input->filter.formatted.bkt_hash && + rule->action == input->action) { + e_info(drv, "FDIR entry already exist\n"); + return true; + } + } + return false; +} + +static int txgbe_add_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_fdir_filter *input; + union txgbe_atr_input mask; + u8 queue; + int err; + u16 ptype = 0; + + if ((fsp->flow_type & ~FLOW_EXT) == ETHER_FLOW) + return txgbe_add_ethertype_filter(adapter, fsp); + + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* ring_cookie is a masked into a set of queues and txgbe pools or + * we use drop index + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && ring >= adapter->num_rx_queues) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } + + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union txgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!txgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | + TXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; +#ifdef FIXED + /* need fix */ + input->filter.formatted.tunnel_type = + (unsigned char)ntohl(fsp->h_ext.data[0]); + mask.formatted.tunnel_type = + (unsigned char)ntohl(fsp->m_ext.data[0]); +#endif + } + + switch (input->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + ptype = TXGBE_PTYPE_L2_IPV4_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + ptype = TXGBE_PTYPE_L2_IPV4_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + ptype = TXGBE_PTYPE_L2_IPV4_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + ptype = TXGBE_PTYPE_L2_IPV4; + break; + case TXGBE_ATR_FLOW_TYPE_TCPV6: + ptype = TXGBE_PTYPE_L2_IPV6_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV6: + ptype = TXGBE_PTYPE_L2_IPV6_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + ptype = TXGBE_PTYPE_L2_IPV6_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV6: + ptype = TXGBE_PTYPE_L2_IPV6; + break; + default: + break; + } + + input->filter.formatted.vlan_id = htons(ptype); + if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) + mask.formatted.vlan_id = 0xFFFF; + else + mask.formatted.vlan_id = htons(0xFFF8); + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = TXGBE_RDB_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = txgbe_fdir_set_input_mask(hw, &mask, + adapter->cloud_mode); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Hardware only supports one mask per port. To change" + "the mask you must first delete all the rules.\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + txgbe_atr_compute_perfect_hash(&input->filter, &mask); + + /* check if new entry does not exist on filter list */ + if (txgbe_match_ethtool_fdir_entry(adapter, input)) + goto err_out_w_lock; + + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(adapter->netdev)) { + err = txgbe_fdir_write_perfect_filter(hw, + &input->filter, input->sw_idx, + queue, + adapter->cloud_mode); + if (err) + goto err_out_w_lock; + } + + txgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int txgbe_del_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + if (adapter->etype_filter_info.count > 0) { + err = txgbe_del_ethertype_filter(adapter, fsp->location); + if (!err) + return 0; + } + + spin_lock(&adapter->fdir_perfect_lock); + err = txgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +#ifdef ETHTOOL_SRXNTUPLE +/* + * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +static int txgbe_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd) +{ + return -EOPNOTSUPP; +} + +#endif +#define UDP_RSS_FLAGS (TXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int txgbe_set_rss_hash_opt(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~TXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= TXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~TXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= TXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct txgbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, TXGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV4 + | TXGBE_RDB_RA_CTL_RSS_IPV4_TCP + | TXGBE_RDB_RA_CTL_RSS_IPV6 + | TXGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(TXGBE_RDB_RA_CTL_RSS_IPV4_UDP | + TXGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, TXGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = txgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = txgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = txgbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +static int txgbe_rss_indir_tbl_max(struct txgbe_adapter *adapter) +{ + return 64; +} + + +static u32 txgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 txgbe_rss_indir_size(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return txgbe_rss_indir_tbl_entries(adapter); +} + +static void txgbe_get_reta(struct txgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = txgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS +static int txgbe_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) +#else +#ifdef HAVE_RXFH_HASHFUNC +static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#else /* HAVE_RXFH_HASHFUNC */ +static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif /* HAVE_RXFH_HASHFUNC */ +#endif /* HAVE_ETHTOOL_RXFH_RXFHPARAMS */ +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS + u8 *key = rxfh->key; + u32 *indir = rxfh->indir; +#else +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif +#endif + + if (indir) + txgbe_get_reta(adapter, indir); + if (key) + memcpy(key, adapter->rss_key, txgbe_get_rxfh_key_size(netdev)); + + return 0; +} + +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS +static int txgbe_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +#else +#ifdef HAVE_RXFH_HASHFUNC +static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int txgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else /* HAVE_RXFH_NONCONST */ +static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +#endif /* HAVE_ETHTOOL_RXFH_RXFHPARAMS */ +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS + u8 hfunc = rxfh->hfunc; + u8 *key = rxfh->key; + u32 *indir = rxfh->indir; +#endif + +#if (defined(HAVE_RXFH_HASHFUNC) || defined(HAVE_ETHTOOL_RXFH_RXFHPARAMS)) + if (hfunc) + return -EINVAL; +#endif + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + txgbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + txgbe_store_vfreta(adapter); + } else { + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + txgbe_store_reta(adapter); + } + } + + if (key) { + memcpy(adapter->rss_key, key, txgbe_get_rxfh_key_size(netdev)); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pf_pool = adapter->num_vfs; + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), adapter->rss_key[i]); + } else { + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + } + } + + return 0; +} +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ + +#ifdef HAVE_ETHTOOL_GET_TS_INFO +#ifdef HAVE_KERNEL_ETHTOOL_TS_INFO +static int txgbe_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +#else +static int txgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + +#ifdef HAVE_PTP_1588_CLOCK + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + +#endif /* HAVE_PTP_1588_CLOCK */ + return 0; +} +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_SCHANNELS +static unsigned int txgbe_max_channels(struct txgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = TXGBE_MAX_FDIR_INDICES; + if (adapter->xdp_prog) + max_combined = TXGBE_MAX_XDP_RSS_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = txgbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void txgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = txgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int txgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = txgbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > txgbe_max_channels(adapter)) + return -EINVAL; + + if (count < adapter->active_vlan_limited + 1) { + e_dev_info("vlan rate limit active, can't set less than active " + "limited vlan + 1:%d", (adapter->active_vlan_limited + 1)); + return -EINVAL; + } + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + +#if IS_ENABLED(CONFIG_FCOE) + /* cap FCoE limit at 8 */ + if (count > TXGBE_RDB_FCRE_TBL_SIZE) + count = TXGBE_RDB_FCRE_TBL_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; +#endif /* CONFIG_FCOE */ + + /* use setup TC to update any traffic class queue mapping */ + return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} +#endif /* ETHTOOL_SCHANNELS */ + +#ifdef ETHTOOL_GMODULEINFO +static int txgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + u8 identifier = 0; + u8 sff8636_rev = 0; + bool page_swap = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + return -EIO; + } + + if (!netif_carrier_ok(dev)) { + e_err(drv, "\"Ethool -m\" is supported only when link is up for 40G.\n"); + return -EIO; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + return -EIO; + } + } + + if (hw->mac.type != txgbe_mac_sp) { + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); + if (status != 0) + goto ERROR_IO; + + switch (identifier) { + case TXGBE_SFF_IDENTIFIER_SFP: + /* Check whether we support SFF-8472 or not */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto ERROR_IO; + + /* addressing mode is not supported */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto ERROR_IO; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, " + "but not supported. Please report the module type to the " + "driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + case TXGBE_SFF_IDENTIFIER_QSFP: + case TXGBE_SFF_IDENTIFIER_QSFP_PLUS: + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_REVISION_ADDR, + &sff8636_rev); + if (status != 0) + goto ERROR_IO; + + /* Check revision compliance */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = TXGBE_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = TXGBE_MODULE_QSFP_MAX_LEN; + } + break; + default: + e_err(drv, "SFF Module Type not recognized.\n"); + return -EINVAL; + } + + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + } else { + modinfo->type = adapter->eeprom_type; + modinfo->eeprom_len = adapter->eeprom_len; + } + + return 0; + +ERROR_IO: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EIO; +} + +#define SFF_A2_ALRM_FLG 0x170 +#define SFF_A2_WARN_FLG 0x174 +#define SFF_A2_TEMP 0x160 +#define SFF_A2_RX_PWR 0x169 + +static int txgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + int i = 0; + bool is_sfp = false; + u32 value; + u8 identifier = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u8 databyte; + s32 status = 0; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + return -EIO; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + return -EIO; + } + } + + if (hw->mac.type != txgbe_mac_sp) { + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); + + if (ee->len == 0) + goto ERROR_INVAL; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); + if (status != 0) + goto ERROR_IO; + + if (identifier == TXGBE_SFF_IDENTIFIER_SFP) + is_sfp = true; + + memset(data, 0, ee->len); + for (i = 0; i < ee->len; i++) { + u32 offset = i + ee->offset; + u32 page = 0; + + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + goto ERROR_BUSY; + + if (is_sfp) { + if (offset < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, offset, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, offset, + &databyte); + + if (status != 0) + goto ERROR_IO; + } else { + while (offset >= ETH_MODULE_SFF_8436_LEN) { + offset -= ETH_MODULE_SFF_8436_LEN / 2; + page++; + } + + if (page == 0 || !(data[0x2] & 0x4)) { + status = TCALL(hw, phy.ops.read_i2c_sff8636, page, offset, + &databyte); + + if (status != 0) + goto ERROR_IO; + } + } + data[i] = databyte; + } + } else { + if (ee->len == 0) + goto ERROR_INVAL; + + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return -EBUSY; + + /*when down, can't know sfp change, get eeprom from i2c*/ + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + goto ERROR_BUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, i, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + data[i - ee->offset] = databyte; + } + } else { + if (adapter->eeprom_type == ETH_MODULE_SFF_8472) { + + cancel_work_sync(&adapter->sfp_sta_task); + + /*alarm flag*/ + for (i = SFF_A2_ALRM_FLG; i <= SFF_A2_ALRM_FLG + 1; i++){ + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + /*warm flag*/ + for (i = SFF_A2_WARN_FLG; i <= SFF_A2_WARN_FLG + 1; i++){ + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + /*dom monitor value*/ + for (i = SFF_A2_TEMP; i <= SFF_A2_RX_PWR + 1; i++){ + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + } + for (i = ee->offset; i < ee->offset + ee->len; i++) + data[i - ee->offset] = adapter->i2c_eeprom[i]; + } + } + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return 0; +ERROR_BUSY: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EBUSY; +ERROR_IO: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EIO; +ERROR_INVAL: + return -EINVAL; +} +#endif /* ETHTOOL_GMODULEINFO */ + +#ifdef ETHTOOL_GEEE +#ifdef HAVE_ETHTOOL_KEEE +static int txgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata) +#else +static int txgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +#endif +{ + return 0; +} +#endif /* ETHTOOL_GEEE */ + +#ifdef ETHTOOL_SEEE +#ifdef HAVE_ETHTOOL_KEEE +static int txgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata) +#else +static int txgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; +#ifdef HAVE_ETHTOOL_KEEE + struct ethtool_keee eee_data; +#else + struct ethtool_eee eee_data; +#endif + s32 ret_val; + + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE))) + return -EOPNOTSUPP; + +#ifdef HAVE_ETHTOOL_KEEE + memset(&eee_data, 0, sizeof(struct ethtool_keee)); +#else + memset(&eee_data, 0, sizeof(struct ethtool_eee)); +#endif + + ret_val = txgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not " + "supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not " + "supported\n"); + return -EINVAL; + } + + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + + if (edata->eee_enabled) + adapter->flags2 |= TXGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~TXGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); + } + + return 0; +} +#endif /* ETHTOOL_SEEE */ + +static int txgbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (ef->region == 0) { + ret = txgbe_upgrade_flash(&adapter->hw, ef->region, + fw->data, fw->size); + } else { + if (txgbe_mng_present(&adapter->hw)) { + ret = txgbe_upgrade_flash_hostif(&adapter->hw, ef->region, + fw->data, fw->size); + } else + ret = -EOPNOTSUPP; + } + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reboot to make firmware work\n", ef->data); + return ret; +} + + +static struct ethtool_ops txgbe_ethtool_ops = { +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = txgbe_get_link_ksettings, + .set_link_ksettings = txgbe_set_link_ksettings, +#else + .get_settings = txgbe_get_settings, + .set_settings = txgbe_set_settings, +#endif +#ifdef ETHTOOL_GFECPARAM + .get_fecparam = txgbe_get_fec_param, + .set_fecparam = txgbe_set_fec_param, +#endif /* ETHTOOL_GFECPARAM */ + .get_drvinfo = txgbe_get_drvinfo, + .get_regs_len = txgbe_get_regs_len, + .get_regs = txgbe_get_regs, + .get_wol = txgbe_get_wol, + .set_wol = txgbe_set_wol, + .nway_reset = txgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = txgbe_get_eeprom_len, + .get_eeprom = txgbe_get_eeprom, + .set_eeprom = txgbe_set_eeprom, + .get_ringparam = txgbe_get_ringparam, + .set_ringparam = txgbe_set_ringparam, + .get_pauseparam = txgbe_get_pauseparam, + .set_pauseparam = txgbe_set_pauseparam, + .get_msglevel = txgbe_get_msglevel, + .set_msglevel = txgbe_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = txgbe_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = txgbe_diag_test, + .get_strings = txgbe_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = txgbe_set_phys_id, +#else + .phys_id = txgbe_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = txgbe_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = txgbe_get_sset_count, + .get_priv_flags = txgbe_get_priv_flags, + .set_priv_flags = txgbe_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = txgbe_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + +#ifdef HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE, +#endif + .get_coalesce = txgbe_get_coalesce, + .set_coalesce = txgbe_set_coalesce, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = txgbe_get_rx_csum, + .set_rx_csum = txgbe_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = txgbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = txgbe_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = txgbe_set_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = txgbe_set_rx_ntuple, +#endif +#endif /* ETHTOOL_GRXRINGS */ +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + +#ifdef ETHTOOL_GEEE + .get_eee = txgbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = txgbe_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef ETHTOOL_SCHANNELS + .get_channels = txgbe_get_channels, + .set_channels = txgbe_set_channels, +#endif +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = txgbe_get_module_info, + .get_module_eeprom = txgbe_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = txgbe_get_ts_info, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = txgbe_rss_indir_size, + .get_rxfh_key_size = txgbe_get_rxfh_key_size, + .get_rxfh = txgbe_get_rxfh, + .set_rxfh = txgbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + .flash_device = txgbe_set_flash, +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext txgbe_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = txgbe_get_ts_info, + .set_phys_id = txgbe_set_phys_id, + .get_channels = txgbe_get_channels, + .set_channels = txgbe_set_channels, +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = txgbe_get_module_info, + .get_module_eeprom = txgbe_get_module_eeprom, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = txgbe_rss_indir_size, + .get_rxfh_key_size = txgbe_get_rxfh_key_size, + .get_rxfh = txgbe_get_rxfh, + .set_rxfh = txgbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GEEE + .get_eee = txgbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = txgbe_set_eee, +#endif /* ETHTOOL_SEEE */ +}; + +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +void txgbe_set_ethtool_ops(struct net_device *netdev) +{ +#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &txgbe_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &txgbe_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &txgbe_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} +#endif /* SIOCETHTOOL */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h deleted file mode 100644 index ace1b3571012407dd96678adb9ab1e11b568c00d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_ETHTOOL_H_ -#define _TXGBE_ETHTOOL_H_ - -void txgbe_set_ethtool_ops(struct net_device *netdev); - -#endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c new file mode 100644 index 0000000000000000000000000000000000000000..2f885119ea927ede1a5fb11b5ee079f59a0f07c5 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c @@ -0,0 +1,984 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_fcoe.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "txgbe.h" + +#if IS_ENABLED(CONFIG_FCOE) +#if IS_ENABLED(CONFIG_DCB) +#include "txgbe_dcb.h" +#endif /* CONFIG_DCB */ +#include +#include +#include +#include +#include +#include +#include + +/** + * txgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp - ptr to the txgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void txgbe_fcoe_clear_ddp(struct txgbe_fcoe_ddp *ddp) +{ + ddp->len = 0; + ddp->err = 1; + ddp->udl = NULL; + ddp->udp = 0UL; + ddp->sgl = NULL; + ddp->sgc = 0; +} + +/** + * txgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int txgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + int len = 0; + struct txgbe_fcoe *fcoe; + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct txgbe_fcoe_ddp *ddp; + u32 fcbuff; + + if (!netdev) + goto out_ddp_put; + + if (xid > netdev->fcoe_ddp_xid) + goto out_ddp_put; + + adapter = netdev_priv(netdev); + hw = &adapter->hw; + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto out_ddp_put; + + len = ddp->len; + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { + + /* other hardware requires DDP FCoE lock */ + spin_lock_bh(&fcoe->lock); + + wr32(hw, TXGBE_PSR_FC_FLT_CTXT, 0); + wr32(hw, TXGBE_PSR_FC_FLT_RW, + (xid | TXGBE_PSR_FC_FLT_RW_WE)); + wr32(hw, TXGBE_RDM_FCBUF, 0); + wr32(hw, TXGBE_RDM_FCRW, + (xid | TXGBE_RDM_FCRW_WE)); + + /* read FCBUFF to check context invalidated */ + wr32(hw, TXGBE_RDM_FCRW, + (xid | TXGBE_RDM_FCRW_RE)); + fcbuff = rd32(hw, TXGBE_RDM_FCBUF); + + spin_unlock_bh(&fcoe->lock); + + /* guaranteed to be invalidated after 100us */ + if (fcbuff & TXGBE_RDM_FCBUF_VALID) + udelay(100); + } + if (ddp->sgl) + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } + + txgbe_fcoe_clear_ddp(ddp); + +out_ddp_put: + return len; +} + +/** + * txgbe_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * Returns : 1 for success and 0 for no ddp + */ +static int txgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct txgbe_fcoe *fcoe; + struct txgbe_fcoe_ddp *ddp; + struct txgbe_fcoe_ddp_pool *ddp_pool; + struct scatterlist *sg; + unsigned int i, j, dmacount; + unsigned int len; + static const unsigned int bufflen = TXGBE_FCBUFF_MIN; + unsigned int firstoff = 0; + unsigned int lastsize; + unsigned int thisoff = 0; + unsigned int thislen = 0; + u32 fcbuff, fcdmarw, fcfltrw, fcfltctxt; + dma_addr_t addr = 0; + + if (!netdev || !sgl || !sgc) + return 0; + + adapter = netdev_priv(netdev); + if (xid > netdev->fcoe_ddp_xid) { + e_warn(drv, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return 0; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + txgbe_fcoe_clear_ddp(ddp); + + + if (!fcoe->ddp_pool) { + e_warn(drv, "No ddp_pool resources allocated\n"); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + goto out_noddp; + } + + /* setup dma from scsi command sgl */ + dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, + DMA_FROM_DEVICE); + if (dmacount == 0) { + e_err(drv, "xid 0x%x DMA map error\n", xid); + goto out_noddp; + } + + /* alloc the udl from per cpu ddp pool */ + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + e_err(drv, "failed allocated ddp context\n"); + goto out_noddp_unmap; + } + ddp->pool = ddp_pool->pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + + j = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= TXGBE_BUFFCNT_MAX) { + ddp_pool->noddp++; + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min((bufflen - thisoff), len); + /* + * all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + /* + * all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) + && ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + lastsize = thisoff + thislen; + + /* + * lastsize can not be bufflen. + * If it is then adding another buffer with lastsize = 1. + * Since lastsize is 1 there will be no HW access to this buffer. + */ + if (lastsize == bufflen) { + if (j >= TXGBE_BUFFCNT_MAX) { + ddp_pool->noddp_ext_buff++; + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + put_cpu(); + + fcbuff = TXGBE_RDM_FCBUF_SIZE(TXGBE_FCBUFF_4KB) | + TXGBE_RDM_FCBUF_COUNT(j) | + TXGBE_RDM_FCBUF_OFFSET(firstoff) | + TXGBE_RDM_FCBUF_VALID; + + /* Set WRCONTX bit to allow DDP for target */ + fcfltctxt = TXGBE_PSR_FC_FLT_CTXT_VALID; + if (!target_mode) + fcfltctxt |= TXGBE_PSR_FC_FLT_CTXT_WR; + + fcdmarw = xid | TXGBE_RDM_FCRW_WE | + TXGBE_RDM_FCRW_LASTSIZE(lastsize); + + fcfltrw = xid; + fcfltrw |= TXGBE_PSR_FC_FLT_RW_WE; + + /* program DMA context */ + hw = &adapter->hw; + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. */ + if (target_mode && !test_bit(__TXGBE_FCOE_TARGET, &fcoe->mode)) { + set_bit(__TXGBE_FCOE_TARGET, &fcoe->mode); + wr32m(hw, TXGBE_PSR_FC_CTL, + TXGBE_PSR_FC_CTL_LASTSEQH, TXGBE_PSR_FC_CTL_LASTSEQH); + } + + /* other devices require DDP lock with direct DDP context access */ + spin_lock_bh(&fcoe->lock); + + wr32(hw, TXGBE_RDM_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_RDM_FCPTRH, (u64)ddp->udp >> 32); + wr32(hw, TXGBE_RDM_FCBUF, fcbuff); + wr32(hw, TXGBE_RDM_FCRW, fcdmarw); + /* program filter context */ + wr32(hw, TXGBE_PSR_FC_PARAM, 0); + wr32(hw, TXGBE_PSR_FC_FLT_CTXT, fcfltctxt); + wr32(hw, TXGBE_PSR_FC_FLT_RW, fcfltrw); + + spin_unlock_bh(&fcoe->lock); + + + return 1; + +out_noddp_free: + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + txgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); +out_noddp: + put_cpu(); + return 0; +} + +/** + * txgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int txgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return txgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +/** + * txgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int txgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return txgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ +/** + * txgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: txgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +int txgbe_fcoe_ddp(struct txgbe_adapter *adapter, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + struct txgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; + int rc = -EINVAL, ddp_max; + __le32 fcerr = txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_FCERR); + __le32 ddp_err; + u32 fctl; + u16 xid; + + if (fcerr == cpu_to_le32(TXGBE_FCERR_BADCRC)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* verify header contains at least the FCOE header */ + BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN); + + fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); + + if (skb->protocol == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN); + + fctl = ntoh24(fh->fh_f_ctl); + if (fctl & FC_FC_EX_CTX) + xid = ntohs(fh->fh_ox_id); + else + xid = ntohs(fh->fh_rx_id); + + ddp_max = TXGBE_FCOE_DDP_MAX; + + if (xid >= ddp_max) + goto ddp_out; + + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto ddp_out; + + ddp_err = txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_FCEOFE | + TXGBE_RXD_ERR_FCERR); + if (ddp_err) + goto ddp_out; + + switch (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_FCSTAT)) { + /* return 0 to bypass going to ULD for DDPed data */ + case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_DDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + rc = 0; + break; + /* unmap the sg list when FCPRSP is received */ + case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_FCPRSP): + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = ddp_err; + ddp->sgl = NULL; + ddp->sgc = 0; + fallthrough; + /* if DDP length is present pass it through to ULD */ + case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_NODDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + if (ddp->len) + rc = ddp->len; + break; + /* no match will return as an error */ + case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_NOMTCH): + default: + break; + } + + /* In target mode, check the last data frame of the sequence. + * For DDP in target mode, data is already DDPed but the header + * indication of the last data frame ould allow is to tell if we + * got all the data and the ULP can send FCP_RSP back, as this is + * not a full fcoe frame, we fill the trailer here so it won't be + * dropped by the ULP stack. + */ + if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && + (fctl & FC_FC_END_SEQ)) { + struct fcoe_crc_eof *crc; + skb_linearize(skb); + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } +ddp_out: + return rc; +} + +/** + * txgbe_fso - txgbe FCoE Sequence Offload (FSO) + * @tx_ring: tx desc ring + * @first: first tx_buffer structure containing skb, tx_flags, and protocol + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates success, < 0 for error + */ +int txgbe_fso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + struct fc_frame_header *fh; + u32 vlan_macip_lens; + u32 fcoe_sof_eof = 0; + u32 mss_l4len_idx; + u8 sof, eof; + +#ifdef NETIF_F_FSO + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting " + "SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type); + return -EINVAL; + } + +#endif + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + + sizeof(struct fcoe_hdr)); + + /* sets up SOF and ORIS */ + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + switch (sof) { + case FC_SOF_I2: + fcoe_sof_eof = TXGBE_TXD_FCOEF_ORIS; + break; + case FC_SOF_I3: + fcoe_sof_eof = TXGBE_TXD_FCOEF_SOF | + TXGBE_TXD_FCOEF_ORIS; + break; + case FC_SOF_N2: + break; + case FC_SOF_N3: + fcoe_sof_eof = TXGBE_TXD_FCOEF_SOF; + break; + default: + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); + return -EINVAL; + } + + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, &eof, 1); + /* sets up EOF and ORIE */ + switch (eof) { + case FC_EOF_N: + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_N; + break; + case FC_EOF_T: + /* lso needs ORIE */ + if (skb_is_gso(skb)) + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_N | + TXGBE_TXD_FCOEF_ORIE; + else + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_T; + break; + case FC_EOF_NI: + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_NI; + break; + case FC_EOF_A: + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_A; + break; + default: + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); + return -EINVAL; + } + + /* sets up PARINC indicating data offset */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + fcoe_sof_eof |= TXGBE_TXD_FCOEF_PARINC; + + /* include trailer in headlen as it is replicated per frame */ + *hdr_len = sizeof(struct fcoe_crc_eof); + + /* hdr_len includes fc_hdr if FCoE LSO is enabled */ + if (skb_is_gso(skb)) { + *hdr_len += skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + /* update gso_segs and bytecount */ + first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, + skb_shinfo(skb)->gso_size); + first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= TXGBE_TX_FLAGS_TSO; + } + + /* set flag indicating FCOE to txgbe_tx_map call */ + first->tx_flags |= TXGBE_TX_FLAGS_FCOE | TXGBE_TX_FLAGS_CC; + + /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ + mss_l4len_idx = skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + vlan_macip_lens |= (skb_transport_offset(skb) - 4) + << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + /* write context desc */ + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, + TXGBE_TXD_TUCMD_FCOE, mss_l4len_idx); + + return 0; +} + +static void txgbe_fcoe_dma_pool_free(struct txgbe_fcoe *fcoe, unsigned int cpu) +{ + struct txgbe_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (ddp_pool->pool) + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + +static int txgbe_fcoe_dma_pool_alloc(struct txgbe_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct txgbe_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + snprintf(pool_name, 32, "txgbe_fcoe_ddp_%d", cpu); + + pool = dma_pool_create(pool_name, dev, TXGBE_FCPTR_MAX, + TXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!pool) + return -ENOMEM; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + ddp_pool->pool = pool; + ddp_pool->noddp = 0; + ddp_pool->noddp_ext_buff = 0; + + return 0; +} + +/** + * txgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to txgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void txgbe_configure_fcoe(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; + struct txgbe_hw *hw = &adapter->hw; + int i, fcoe_i; + u32 fcoe_q; + u32 etqf; + int fcreta_size; + + /* Minimal funcionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) + return; + + /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ + etqf = ETH_P_FCOE | TXGBE_PSR_ETYPE_SWC_FCOE | + TXGBE_PSR_ETYPE_SWC_FILTER_EN; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= VMDQ_P(0) << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FCOE), + etqf); + wr32(hw, + TXGBE_RDB_ETYPE_CLS(TXGBE_PSR_ETYPE_SWC_FILTER_FCOE), + 0); + + /* leave remaining registers unconfigued if FCoE is disabled */ + if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED)) + return; + + /* Use one or more Rx queues for FCoE by redirection table */ + fcreta_size = TXGBE_RDB_FCRE_TBL_SIZE; + + for (i = 0; i < fcreta_size; i++) { + fcoe_i = + TXGBE_RDB_FCRE_TBL_RING(fcoe->offset + (i % fcoe->indices)); + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + wr32(hw, TXGBE_RDB_FCRE_TBL(i), fcoe_q); + } + wr32(hw, TXGBE_RDB_FCRE_CTL, TXGBE_RDB_FCRE_CTL_ENA); + + /* Enable L2 EtherType filter for FIP */ + etqf = ETH_P_FIP | TXGBE_PSR_ETYPE_SWC_FILTER_EN; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= VMDQ_P(0) << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + wr32(hw, TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FIP), + etqf); + + /* Send FIP frames to the first FCoE queue */ + fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; + wr32(hw, TXGBE_RDB_ETYPE_CLS(TXGBE_PSR_ETYPE_SWC_FILTER_FIP), + TXGBE_RDB_ETYPE_CLS_QUEUE_EN | + (fcoe_q << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT)); + + /* Configure FCoE Rx control */ + wr32(hw, TXGBE_PSR_FC_CTL, + TXGBE_PSR_FC_CTL_FCCRCBO | + TXGBE_PSR_FC_CTL_FCOEVER(FC_FCOE_VER) | + TXGBE_PSR_FC_CTL_ALLH); +} + +/** + * txgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources + * @adapter : txgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void txgbe_free_fcoe_ddp_resources(struct txgbe_adapter *adapter) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + int cpu, i, ddp_max; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + + ddp_max = TXGBE_FCOE_DDP_MAX; + + for (i = 0; i < ddp_max; i++) + txgbe_fcoe_ddp_put(adapter->netdev, i); + + for_each_possible_cpu(cpu) + txgbe_fcoe_dma_pool_free(fcoe, cpu); + + dma_unmap_single(pci_dev_to_dev(adapter->pdev), + fcoe->extra_ddp_buffer_dma, + TXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); + + fcoe->extra_ddp_buffer = NULL; + fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * txgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: txgbe adapter + * + * Sets up ddp context resouces + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int txgbe_setup_fcoe_ddp_resources(struct txgbe_adapter *adapter) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = pci_dev_to_dev(adapter->pdev); + void *buffer; + dma_addr_t dma; + unsigned int cpu; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return 0; + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(TXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (!buffer) { + e_err(drv, "failed to allocate extra DDP buffer\n"); + return -ENOMEM; + } + + dma = dma_map_single(dev, buffer, TXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + kfree(buffer); + return -ENOMEM; + } + + fcoe->extra_ddp_buffer = buffer; + fcoe->extra_ddp_buffer_dma = dma; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + int err = txgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); + if (!err) + continue; + + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + txgbe_free_fcoe_ddp_resources(adapter); + return -ENOMEM; + } + + return 0; +} + +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter) +#else +static int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter) +#endif +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & TXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + fcoe->ddp_pool = alloc_percpu(struct txgbe_fcoe_ddp_pool); + + if (!fcoe->ddp_pool) { + e_err(drv, "failed to allocate percpu DDP resources\n"); + return -ENOMEM; + } + + adapter->netdev->fcoe_ddp_xid = TXGBE_FCOE_DDP_MAX - 1; + + return 0; +} + +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter) +#else +static void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter) +#endif +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + + adapter->netdev->fcoe_ddp_xid = 0; + + if (!fcoe->ddp_pool) + return; + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; +} + +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE +/** + * txgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns on FCoE offload feature in sapphire/amber-lite. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int txgbe_fcoe_enable(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_fcoe *fcoe = &adapter->fcoe; + + atomic_inc(&fcoe->refcnt); + + if (!(adapter->flags & TXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) + return -EINVAL; + + e_info(drv, "Enabling FCoE offload features.\n"); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Allocate per CPU memory to track DDP pools */ + txgbe_fcoe_ddp_enable(adapter); + + /* enable FCoE and notify stack */ + adapter->flags |= TXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + txgbe_clear_interrupt_scheme(adapter); + txgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +/** + * txgbe_fcoe_disable - turn off FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns off FCoE offload feature in sapphire/amber-lite. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int txgbe_fcoe_disable(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) + return -EINVAL; + + if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED)) + return -EINVAL; + + e_info(drv, "Disabling FCoE offload features.\n"); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Free per CPU memory to track DDP pools */ + txgbe_fcoe_ddp_disable(adapter); + + /* disable FCoE and notify stack */ + adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED; + netdev->features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + txgbe_clear_interrupt_scheme(adapter); + txgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ + +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_OPS_GETAPP +/** + * txgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE + * @netdev: the corresponding net_device + * + * Finds out the corresponding user priority bitmap from the current + * traffic class that FCoE belongs to. Returns 0 as the invalid user + * priority bitmap to indicate an error. + * + * Returns : 802.1p user priority bitmap for FCoE + */ +u8 txgbe_fcoe_getapp(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + return 1 << adapter->fcoe.up; +} +#endif /* HAVE_DCBNL_OPS_GETAPP */ +#endif /* CONFIG_DCB */ +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN +/** + * txgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : txgbe adapter + * @wwn : the world wide name + * @type: the type of world wide name + * + * Returns the node or port world wide name if both the prefix and the san + * mac address are valid, then the wwn is formed based on the NAA-2 for + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). + * + * Returns : 0 on success + */ +int txgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + int rc = -EINVAL; + u16 prefix = 0xffff; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_mac_info *mac = &adapter->hw.mac; + + switch (type) { + case NETDEV_FCOE_WWNN: + prefix = mac->wwnn_prefix; + break; + case NETDEV_FCOE_WWPN: + prefix = mac->wwpn_prefix; + break; + default: + break; + } + + if ((prefix != 0xffff) && + is_valid_ether_addr(mac->san_addr)) { + *wwn = ((u64) prefix << 48) | + ((u64) mac->san_addr[0] << 40) | + ((u64) mac->san_addr[1] << 32) | + ((u64) mac->san_addr[2] << 24) | + ((u64) mac->san_addr[3] << 16) | + ((u64) mac->san_addr[4] << 8) | + ((u64) mac->san_addr[5]); + rc = 0; + } + return rc; +} + +#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ +/** + * txgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 txgbe_fcoe_get_tc(struct txgbe_adapter *adapter) +{ + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +} +#endif /* CONFIG_FCOE */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h new file mode 100644 index 0000000000000000000000000000000000000000..bb0f4c7997ebaec76fc041e00a7067a381bb1460 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h @@ -0,0 +1,91 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_fcoe.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _TXGBE_FCOE_H_ +#define _TXGBE_FCOE_H_ + +#if IS_ENABLED(CONFIG_FCOE) + +#include +#include + +/* shift bits within STAT fo FCSTAT */ +#define TXGBE_RXD_FCSTAT_SHIFT 4 + +/* ddp user buffer */ +#define TXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ +#define TXGBE_FCPTR_ALIGN 16 +#define TXGBE_FCPTR_MAX (TXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define TXGBE_FCBUFF_4KB 0x0 +#define TXGBE_FCBUFF_8KB 0x1 +#define TXGBE_FCBUFF_16KB 0x2 +#define TXGBE_FCBUFF_64KB 0x3 +#define TXGBE_FCBUFF_MAX 65536 /* 64KB max */ +#define TXGBE_FCBUFF_MIN 4096 /* 4KB min */ +#define TXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ + +/* Default user priority to use for FCoE */ +#define TXGBE_FCOE_DEFUP 3 + +/* fcerr */ +#define TXGBE_FCERR_BADCRC 0x00100000 +#define TXGBE_FCERR_EOFSOF 0x00200000 +#define TXGBE_FCERR_NOFIRST 0x00300000 +#define TXGBE_FCERR_OOOSEQ 0x00400000 +#define TXGBE_FCERR_NODMA 0x00500000 +#define TXGBE_FCERR_PKTLOST 0x00600000 + +/* FCoE DDP for target mode */ +#define __TXGBE_FCOE_TARGET 1 + +struct txgbe_fcoe_ddp { + int len; + u32 err; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct dma_pool *pool; +}; + +/* per cpu variables */ +struct txgbe_fcoe_ddp_pool { + struct dma_pool *pool; + u64 noddp; + u64 noddp_ext_buff; +}; + +struct txgbe_fcoe { + struct txgbe_fcoe_ddp_pool __percpu *ddp_pool; + atomic_t refcnt; + spinlock_t lock; + struct txgbe_fcoe_ddp ddp[TXGBE_FCOE_DDP_MAX]; + void *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; + unsigned long mode; + u8 up; + u8 up_set; +}; +#endif /* CONFIG_FCOE */ + +#endif /* _TXGBE_FCOE_H */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 37274525027002a42d21fa36ea6db39b03f5cb68..9eaf70a435eda119a1cb40948a220ea07c4279bc 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -1,120 +1,538 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_hw.h" +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_82599.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + #include "txgbe_type.h" #include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe_dcb.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe.h" + +#define TXGBE_SP_MAX_TX_QUEUES 128 +#define TXGBE_SP_MAX_RX_QUEUES 128 + +#define TXGBE_SP_RAR_ENTRIES 128 +#define TXGBE_SP_MC_TBL_SIZE 128 +#define TXGBE_SP_VFT_TBL_SIZE 128 +#define TXGBE_SP_RX_PB_SIZE 512 + +STATIC s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw); +STATIC void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw); +STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr); +STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, + u16 *san_mac_offset); + +STATIC s32 txgbe_setup_copper_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 txgbe_check_mac_link_sp(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete); + + + +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_ETHPHY_IDA_DATA; + data = rd32(hw, portRegOffset); + return data; +} + + +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_XPCS_IDA_DATA; + data = rd32(hw, portRegOffset); + + return data; +} + + +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_ETHPHY_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_XPCS_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +#if 0 +s32 txgbe_set_amlite_pcs_mode(struct txgbe_hw *hw, int eth_mode) { + + u32 ss52, data; + u32 speed_select, pcs_type_select, pma_type; + u32 pcs_dig_ctrl3, vr_pcs_ctrl3, vr_pcs_ctrl3; + u32 sr_pma_rs_fec_ctl; + + ss52 = txgbe_rd32_epcs(hw, SR_PCS_CTL1); + data = txgbe_rd32_epcs(hw, SR_PCS_CTL2); + + switch (eth_mode) { + case ETH_RATE_10G: + speed_select = 0x0; + pcs_type_select = 0x0; + pma_type = 0xb; + pcs_dig_ctrl3 = 0x0; + break; + case ETH_RATE_25G: + speed_select = 0x5; + pcs_type_select = 0x7; + pma_type = 0x39; + pcs_dig_ctrl3 = 0x0; + break; + default: + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Erroe Eth_mode"); + return -1; + } + +} + +s32 txgbe_set_amlite_phy_mode(struct txgbe_hw *hw, int eth_mode) { + + u32 ss52, data; + u32 pll0_div_cfg, pin_ovrden, pin_ovrdval; + u32 datapath_cfg0, an_cfg; + + ss52 = txgbe_rd32_epcs(hw, SR_PCS_CTL1); + + switch (eth_mode) { + case ETH_RATE_10G: + pll0_div_cfg = 0x29408; + pin_ovrden = 0x0; + pin_ovrdval = 0x0; + datapath_cfg0; + break; + case ETH_RATE_25G: + pll0_div_cfg = 0x29408; + pin_ovrden = 0x0; + pin_ovrdval = 0x0; + datapath_cfg0; + break; + default: + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Erroe Eth_mode"); + return -1; + } + + + +} + +s32 txgbe_set_amlite_an_status(struct txgbe_hw *hw, bool autoneg) { + if (autoneg) + else +} +#endif /** - * txgbe_disable_sec_tx_path - Stops the transmit data path - * @wx: pointer to hardware structure + * txgbe_dcb_get_rtrup2tc - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map * - * Stops the transmit data path and waits for the HW to internally empty - * the tx security block + * Read the rtrup2tc HW register and resolve its content into map **/ -int txgbe_disable_sec_tx_path(struct wx *wx) +void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map) { - int val; + u32 reg, i; - wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, WX_TSC_CTL_TX_DIS); - return read_poll_timeout(rd32, val, val & WX_TSC_ST_SECTX_RDY, - 1000, 20000, false, wx, WX_TSC_ST); + reg = rd32(hw, TXGBE_RDB_UP2TC); + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) + map[i] = TXGBE_RDB_UP2TC_UP_MASK & + (reg >> (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + return; } /** - * txgbe_enable_sec_tx_path - Enables the transmit data path - * @wx: pointer to hardware structure + * txgbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure * - * Enables the transmit data path. + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. **/ -void txgbe_enable_sec_tx_path(struct wx *wx) +u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw) { - wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, 0); - WX_WRITE_FLUSH(wx); + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + max_msix_count = TXGBE_MAX_MSIX_VECTORS_SAPPHIRE; + pos = pci_find_capability(((struct txgbe_adapter *)hw->back)->pdev, PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct txgbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (TXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= TXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; } /** - * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds - * @wx: pointer to hardware structure + * txgbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure * - * Inits the thermal sensor thresholds according to the NVM map - * and save off the threshold and location values into mac.thermal_sensor_data + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized **/ -static void txgbe_init_thermal_sensor_thresh(struct wx *wx) +s32 txgbe_init_hw(struct txgbe_hw *hw) { - struct wx_thermal_sensor_data *data = &wx->mac.sensor; + s32 status; - memset(data, 0, sizeof(struct wx_thermal_sensor_data)); + /* Reset the hardware */ + status = TCALL(hw, mac.ops.reset_hw); - /* Only support thermal sensors attached to SP physical port 0 */ - if (wx->bus.func) - return; + if (status == 0) { + /* Start the HW */ + status = TCALL(hw, mac.ops.start_hw); + } - wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + return status; +} + + +/** + * txgbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw) +{ + u16 i = 0; + + rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW); + for (i = 0; i < 8; i++) + rd32(hw, TXGBE_RDB_MPCNT(i)); + + rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, TXGBE_RDB_LXONTXC); + rd32(hw, TXGBE_RDB_LXOFFTXC); + rd32(hw, TXGBE_MAC_LXONRXC); + rd32(hw, TXGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + rd32(hw, TXGBE_RDB_PXONTXC(i)); + rd32(hw, TXGBE_RDB_PXOFFTXC(i)); + rd32(hw, TXGBE_MAC_PXONRXC(i)); + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i<<16); + rd32(hw, TXGBE_MAC_PXOFFRXC); + } + for (i = 0; i < 8; i++) + rd32(hw, TXGBE_RDB_PXON2OFFCNT(i)); + for (i = 0; i < 128; i++) { + wr32(hw, TXGBE_PX_MPRC(i), 0); + } + + rd32(hw, TXGBE_PX_GPRC); + rd32(hw, TXGBE_PX_GPTC); + rd32(hw, TXGBE_PX_GORC_MSB); + rd32(hw, TXGBE_PX_GOTC_MSB); + + rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_RDM_DRP_PKT); + return 0; +} + +/** + * txgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. + * + **/ +bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw) +{ + bool supported = false; + u32 speed; + bool link_up = false; + u8 device_type = hw->subsystem_device_id & 0xF0; + + switch (hw->phy.media_type) { + case txgbe_media_type_fiber_qsfp: + case txgbe_media_type_fiber: + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + /* if link is down, assume supported */ + /* amlite TODO*/ + if (link_up) + supported = speed == TXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case txgbe_media_type_backplane: + supported = (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII); + break; + case txgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + supported = true; + break; + default: + break; + } + + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Device %x does not support flow control autoneg", + hw->device_id); + return supported; +} + +/** + * txgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 txgbe_setup_fc(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + u32 pcap = 0; + u32 value = 0; + u32 pcap_backplane = 0; - wr32(wx, WX_TS_INT_EN, - WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN); - wr32(wx, WX_TS_EN, WX_TS_EN_ENA); + /*amlite TODO*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + return 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == txgbe_fc_rx_pause) { + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, + "txgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == txgbe_fc_default) + hw->fc.requested_mode = txgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case txgbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case txgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + pcap |= TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM; + pcap_backplane |= TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + break; + case txgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case txgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + pcap |= TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM | + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM; + pcap_backplane |= TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + break; + default: + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = TXGBE_ERR_CONFIG; + goto out; + } + + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV); + value = (value & ~(TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM | + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM)) | pcap; + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV, value); + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. + */ + if (hw->phy.media_type == txgbe_media_type_backplane) { + value = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + value = (value & ~(TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM | + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM)) | + pcap_backplane; + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1, value); + + } else if ((hw->phy.media_type == txgbe_media_type_copper) && + (txgbe_device_supports_autoneg_fc(hw))) { + /* avoid fw access phy */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) { + /* Let firmware know the driver has taken over */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + + mdelay(100); + } + + ret_val = txgbe_set_phy_pause_advertisement(hw, pcap_backplane); - data->alarm_thresh = 100; - wr32(wx, WX_TS_ALARM_THRE, 677); - data->dalarm_thresh = 90; - wr32(wx, WX_TS_DALARM_THRE, 614); + /* Let firmware take over control of h/w */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + } +out: + return ret_val; } /** * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure + * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) +s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) { - u16 pba_ptr, offset, length, data; - int ret_val; + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return TXGBE_ERR_INVALID_ARGUMENT; } - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); + ret_val = TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, + &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); return ret_val; } - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); + ret_val = TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, + &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); return ret_val; } - /* if data is not ptr guard the PBA must be in legacy format which + /* + * if data is not ptr guard the PBA must be in legacy format which * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); + DEBUGOUT("NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; + DEBUGOUT("PBA string buffer too small\n"); + return TXGBE_ERR_NO_SPACE; } /* extract hex string from data and pba_ptr */ @@ -143,21 +561,21 @@ int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) return 0; } - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); + ret_val = TCALL(hw, eeprom.ops.read, pba_ptr, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; + DEBUGOUT("NVM PBA number section invalid length\n"); + return TXGBE_ERR_PBA_SECTION; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; + DEBUGOUT("PBA string buffer too small\n"); + return TXGBE_ERR_NO_SPACE; } /* trim pba length from start of string */ @@ -165,9 +583,9 @@ int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) length--; for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); + ret_val = TCALL(hw, eeprom.ops.read, pba_ptr + offset, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(data >> 8); @@ -179,144 +597,7616 @@ int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) } /** - * txgbe_calc_eeprom_checksum - Calculates and returns the checksum - * @wx: pointer to hardware structure - * @checksum: pointer to cheksum + * txgbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address * - * Returns a negative error code on error + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ -static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) +s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr) { - u16 *eeprom_ptrs = NULL; - u16 *local_buffer; - int status; + u32 rar_high; + u32 rar_low; u16 i; - wx_init_eeprom_params(wx); + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); - eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16), - GFP_KERNEL); - if (!eeprom_ptrs) - return -ENOMEM; - /* Read pointer area */ - status = wx_read_ee_hostif_buffer(wx, 0, TXGBE_EEPROM_LAST_WORD, eeprom_ptrs); - if (status != 0) { - wx_err(wx, "Failed to read EEPROM image\n"); - kvfree(eeprom_ptrs); - return status; + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/** + * txgbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the txgbe_hw structure + **/ +void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status) +{ + /* amlite: TODO */ + if (hw->bus.type == txgbe_bus_type_unknown) + hw->bus.type = txgbe_bus_type_pci_express; + + switch (link_status & TXGBE_PCI_LINK_WIDTH) { + case TXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = PCIE_LNK_X1; + break; + case TXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = PCIE_LNK_X2; + break; + case TXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = PCIE_LNK_X4; + break; + case TXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = PCIE_LNK_X8; + break; + default: + hw->bus.width = PCIE_LNK_WIDTH_UNKNOWN; + break; } - local_buffer = eeprom_ptrs; - for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) - if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) - *checksum += local_buffer[i]; + switch (link_status & TXGBE_PCI_LINK_SPEED) { + case TXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = PCIE_SPEED_2_5GT; + break; + case TXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = PCIE_SPEED_5_0GT; + break; + case TXGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = PCIE_SPEED_8_0GT; + break; + default: + hw->bus.speed = PCI_SPEED_UNKNOWN; + break; + } - if (eeprom_ptrs) - kvfree(eeprom_ptrs); +} - *checksum = TXGBE_EEPROM_SUM - *checksum; +/** + * txgbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the txgbe_hw structure. + **/ +s32 txgbe_get_bus_info(struct txgbe_hw *hw) +{ + u16 link_status; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = TXGBE_READ_PCIE_WORD(hw, TXGBE_PCI_LINK_STATUS); + + txgbe_set_pci_config_data(hw, link_status); return 0; } /** - * txgbe_validate_eeprom_checksum - Validate EEPROM checksum - * @wx: pointer to hardware structure - * @checksum_val: calculated checksum + * txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. **/ -int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val) +void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw) { - u16 read_checksum = 0; - u16 checksum; - int status; + struct txgbe_bus_info *bus = &hw->bus; + u32 reg; - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails + reg = rd32(hw, TXGBE_CFG_PORT_ST); + bus->lan_id = TXGBE_CFG_PORT_ST_LAN_ID(reg); + + /* check for a port swap */ + reg = rd32(hw, TXGBE_MIS_PWR); + if (TXGBE_MIS_PWR_LAN_ID_1 == TXGBE_MIS_PWR_LAN_ID(reg)) + bus->func = 0; + else + bus->func = bus->lan_id; +} + +/** + * txgbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within txgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 txgbe_stop_adapter(struct txgbe_hw *hw) +{ + u16 i; + struct txgbe_adapter *adapter = hw->back; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware */ - status = wx_read_ee_hostif(wx, 0, &checksum); - if (status) { - wx_err(wx, "EEPROM read failed\n"); - return status; + hw->adapter_stopped = true; + + /* Disable the receive unit */ + TCALL(hw, mac.ops.disable_rx); + + /* Set interrupt mask to stop interrupts from being generated */ + txgbe_intr_disable(hw, TXGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, TXGBE_PX_MISC_IC, 0xffffffff); + wr32(hw, TXGBE_BME_CTL, 0x3); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, TXGBE_PX_TR_CFG(i), + TXGBE_PX_TR_CFG_SWFLSH | TXGBE_PX_TR_CFG_ENABLE, + TXGBE_PX_TR_CFG_SWFLSH); } - checksum = 0; - status = txgbe_calc_eeprom_checksum(wx, &checksum); - if (status != 0) - return status; + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, TXGBE_PX_RR_CFG(i), + TXGBE_PX_RR_CFG_RR_EN, 0); + } - status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset + - TXGBE_EEPROM_CHECKSUM, &read_checksum); - if (status != 0) - return status; + /* flush all queues disables */ + TXGBE_WRITE_FLUSH(hw); - /* Verify read checksum from EEPROM is the same as - * calculated checksum + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests */ - if (read_checksum != checksum) { - status = -EIO; - wx_err(wx, "Invalid EEPROM checksum\n"); + if (!(adapter->flags2 & TXGBE_FLAG2_ECC_ERR_RESET)) + return txgbe_disable_pcie_master(hw); + else + return 0; +} + +/** + * txgbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 txgbe_led_on(struct txgbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u16 value = 0; + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, (value & 0xFFFC) | 0x0); } + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << TXGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + TXGBE_WRITE_FLUSH(hw); - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; + return 0; +} + +/** + * txgbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 txgbe_led_off(struct txgbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u16 value = 0; + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, (value & 0xFFFC) | 0x1); + } + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << TXGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + TXGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * txgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +STATIC s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_EEPROM; + u32 timeout = 4000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, TXGBE_MIS_SWSM); + if (!(swsm & TXGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + txgbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, TXGBE_MIS_SWSM); + if (!(swsm & TXGBE_MIS_SWSM_SMBI)) + status = 0; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + /* Set the SW EEPROM semaphore bit to request access */ + wr32m(hw, TXGBE_MNG_SW_SM, + TXGBE_MNG_SW_SM_SM, TXGBE_MNG_SW_SM_SM); + + /* + * If we set the bit successfully then we got + * semaphore. + */ + swsm = rd32(hw, TXGBE_MNG_SW_SM); + if (swsm & TXGBE_MNG_SW_SM_SM) + break; + } + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted, MNG_SW_SM_SM is 0x%08x.\n", + swsm); + txgbe_release_eeprom_semaphore(hw); + status = TXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted, MNG_SW_SM_SM is 0x%08x.\n", swsm); + } return status; } -static void txgbe_reset_misc(struct wx *wx) +/** + * txgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw) { - wx_reset_misc(wx); - txgbe_init_thermal_sensor_thresh(wx); + if (txgbe_check_mng_access(hw)) { + wr32m(hw, TXGBE_MNG_SW_SM, + TXGBE_MNG_SW_SM_SM, 0); + wr32m(hw, TXGBE_MIS_SWSM, + TXGBE_MIS_SWSM_SMBI, 0); + TXGBE_WRITE_FLUSH(hw); + } } /** - * txgbe_reset_hw - Perform hardware reset - * @wx: pointer to wx structure + * txgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. + * Tests a MAC address to ensure it is a valid Individual Address **/ -int txgbe_reset_hw(struct wx *wx) +s32 txgbe_validate_mac_addr(u8 *mac_addr) { - int status; + s32 status = 0; - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = wx_stop_adapter(wx); - if (status != 0) - return status; + /* Make sure it is not a multicast address */ + if (TXGBE_IS_MULTICAST(mac_addr)) { + DEBUGOUT("MAC address is multicast\n"); + status = TXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (TXGBE_IS_BROADCAST(mac_addr)) { + DEBUGOUT("MAC address is broadcast\n"); + status = TXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + DEBUGOUT("MAC address is all zeros\n"); + status = TXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} - if (wx->media_type != sp_media_copper) { - u32 val; +/** + * txgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; - val = WX_MIS_RST_LAN_RST(wx->bus.func); - wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); - WX_WRITE_FLUSH(wx); - usleep_range(10, 100); + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return TXGBE_ERR_INVALID_ARGUMENT; } - status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); - if (status != 0) - return status; + /* select the MAC address */ + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index); - txgbe_reset_misc(wx); + /* setup VMDq pool mapping */ + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, pools >> 32); - /* Store the permanent mac address */ - wx_get_mac_addr(wx, wx->mac.perm_addr); + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= TXGBE_PSR_MAC_SWC_AD_H_AV; - /* Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H, + (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) | + TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + TXGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * txgbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. */ - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx_init_rx_addrs(wx); + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0); - pci_set_master(wx->pdev); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H, + (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) | + TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + TXGBE_PSR_MAC_SWC_AD_H_AV), + 0); return 0; } + +/** + * txgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (txgbe_validate_mac_addr(hw->mac.addr) == + TXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + TCALL(hw, mac.ops.set_rar, 0, hw->mac.addr, 0, + TXGBE_PSR_MAC_SWC_AD_H_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + TCALL(hw, mac.ops.clear_vmdq, 0, TXGBE_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT; + wr32(hw, TXGBE_PSR_CTL, psrctl); + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, TXGBE_PSR_MC_TBL(i), 0); + + TCALL(hw, mac.ops.init_uta_tables); + + return 0; +} + +/** + * txgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +static void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("txgbe_add_uc_addr Complete\n"); +} + +/** + * txgbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 1+i); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + txgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_UPE, TXGBE_PSR_CTL_UPE); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_UPE, 0); + } + } + + DEBUGOUT("txgbe_update_uc_addr_list Complete\n"); + return 0; +} + +/** + * txgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * txgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = txgbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * txgbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, txgbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, TXGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= TXGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT); + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + DEBUGOUT("txgbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * txgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 txgbe_enable_mc(struct txgbe_hw *hw) +{ + struct txgbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= TXGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT); + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 txgbe_disable_mc(struct txgbe_hw *hw) +{ + struct txgbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT; + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 txgbe_fc_enable(struct txgbe_hw *hw) +{ + u32 mflcn_reg = 0; + u32 fccfg_reg = 0; + s32 ret_val = 0; + u32 fcrtl = 0; + u32 fcrth = 0; + u32 reg = 0; + int i = 0; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* Negotiate the fc mode to use */ + txgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, TXGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~(TXGBE_MAC_RX_FLOW_CTRL_PFCE | + TXGBE_MAC_RX_FLOW_CTRL_RFE); + + fccfg_reg = rd32(hw, TXGBE_RDB_RFCC); + fccfg_reg &= ~(TXGBE_RDB_RFCC_RFCE_802_3X | + TXGBE_RDB_RFCC_RFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case txgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case txgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= TXGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case txgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= TXGBE_RDB_RFCC_RFCE_802_3X; + break; + case txgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= TXGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= TXGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = TXGBE_ERR_CONFIG; + goto out; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, TXGBE_RDB_RFCC, fccfg_reg); + + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10); + + wr32(hw, TXGBE_RDB_RFCL(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | + TXGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, TXGBE_RDB_RFCL(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, TXGBE_RDB_PB_SZ(i)) - 24576; + } + + wr32(hw, TXGBE_RDB_RFCH(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + wr32(hw, TXGBE_RDB_RFCV(i), reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * txgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +STATIC s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(TXGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return TXGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == txgbe_fc_full) { + hw->fc.current_mode = txgbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = txgbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = txgbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = txgbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = txgbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return 0; +} + +/** + * txgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +STATIC s32 txgbe_fc_autoneg_fiber(struct txgbe_hw *hw) +{ + + u32 pcs_anadv_reg, pcs_lpab_reg; + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + + + pcs_anadv_reg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV); + pcs_lpab_reg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_LP_BABL); + + ret_val = txgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM); + + return ret_val; +} + +/** + * txgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 txgbe_fc_autoneg_backplane(struct txgbe_hw *hw) +{ + u32 anlp1_reg, autoc_reg; + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + anlp1_reg = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + + ret_val = txgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM); + + return ret_val; +} + +/** + * txgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 txgbe_fc_autoneg_copper(struct txgbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + txgbe_get_phy_advertised_pause(hw, &technology_ability_reg); + txgbe_get_lp_advertised_pause(hw, &lp_technology_ability_reg); + + return txgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE, + TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE); +} + +/** + * txgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void txgbe_fc_autoneg(struct txgbe_hw *hw) +{ + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up = 0; + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case txgbe_media_type_fiber: + if (speed == TXGBE_LINK_SPEED_1GB_FULL) + ret_val = txgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case txgbe_media_type_backplane: + ret_val = txgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case txgbe_media_type_copper: + if (txgbe_device_supports_autoneg_fc(hw)) + ret_val = txgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * txgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. TXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 txgbe_disable_pcie_master(struct txgbe_hw *hw) +{ + s32 status = 0; + u32 i; + struct txgbe_adapter *adapter = hw->back; + unsigned int num_vfs = adapter->num_vfs; + u16 dev_ctl; + u32 vf_bme_clear = 0; + u16 vid = 0; + u16 cmd = 0; + u32 reg32 = 0; + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct txgbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING)) || + TXGBE_REMOVED(hw->hw_addr)) + goto out; + + /* BME disable handshake will not be finished if any VF BME is 0 */ + for (i = 0; i < num_vfs; i++) { + struct pci_dev *vfdev = NULL; + if (!adapter->vfinfo) + break; + vfdev = adapter->vfinfo[i].vfdev; + if (!vfdev) continue; + pci_read_config_word(vfdev, 0x4, &dev_ctl); + if ((dev_ctl & 0x4) == 0) { + vf_bme_clear = 1; + break; + } + } + + /* Poll for master request bit to clear */ + for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + if(!vf_bme_clear) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = TXGBE_ERR_MASTER_REQUESTS_PENDING; + + /* print out PCI configuration space value */ + txgbe_print_tx_hang_status(adapter); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCI VID is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &cmd); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCI COMMAND value is 0x%x.\n", cmd); + + reg32 = rd32(hw, 0x10000); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "read 0x10000 value is 0x%08x\n", reg32); + } + +out: + return status; +} + + +/** + * txgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (txgbe_get_eeprom_semaphore(hw)) + return TXGBE_ERR_SWFW_SYNC; + + if (txgbe_check_mng_access(hw)) { + gssr = rd32(hw, TXGBE_MNG_SWFW_SYNC); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + wr32(hw, TXGBE_MNG_SWFW_SYNC, gssr); + txgbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + txgbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + txgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return TXGBE_ERR_SWFW_SYNC; +} + +/** + * txgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask) +{ + txgbe_get_eeprom_semaphore(hw); + if (txgbe_check_mng_access(hw)) + wr32m(hw, TXGBE_MNG_SWFW_SYNC, mask, 0); + + txgbe_release_eeprom_semaphore(hw); +} + +/** + * txgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) +{ +#define TXGBE_MAX_SECRX_POLL 40 + + int i; + int secrxreg; + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, TXGBE_RSC_CTL_RX_DIS); + for (i = 0; i < TXGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, TXGBE_RSC_ST); + if (secrxreg & TXGBE_RSC_ST_RSEC_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= TXGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * txgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) +{ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, 0); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * txgbe_disable_sec_tx_path - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the tx security block + **/ +s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw) +{ +#define TXGBE_MAX_SECTX_POLL 40 + + int i; + int secrxreg; + + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS, TXGBE_TSC_CTL_TX_DIS); + for (i = 0; i < TXGBE_MAX_SECTX_POLL; i++) { + secrxreg = rd32(hw, TXGBE_TSC_ST); + if (secrxreg & TXGBE_TSC_ST_SECTX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= TXGBE_MAX_SECTX_POLL) + DEBUGOUT("Tx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * txgbe_enable_sec_Tx_path - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw) +{ + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS, 0); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + + +/** + * txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + TXGBE_SAN_MAC_ADDR_PTR); + } + + return ret_val; +} + +/** + * txgbe_get_san_mac_addr - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + goto san_mac_addr_out; + + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = TCALL(hw, eeprom.ops.read, san_mac_offset, + &san_mac_data); + if (ret_val) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return 0; + +san_mac_addr_out: + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return 0; +} + +/** + * txgbe_set_san_mac_addr - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return TXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + TCALL(hw, eeprom.ops.write, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return 0; +} + + +/** + * txgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + /* swap bytes for HW little endian */ + addr_low = addr[5] | (addr[4] << 8) + | (addr[3] << 16) + | (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + + if (((TXGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + TCALL(hw, mac.ops.set_vmdq, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return TXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + TCALL(hw, mac.ops.clear_vmdq, rar, 0); + + return rar; +} + +/** + * txgbe_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(hw, TXGBE_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(hw, TXGBE_PSR_MAC_SWC_VM_H); + + if (TXGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo && !mpsar_hi) + goto done; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + TCALL(hw, mac.ops.clear_rar, rar); +done: + return 0; +} + +/** + * txgbe_set_vmdq - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 pool) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * txgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + if (vmdq < 32) { + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 1 << vmdq); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0); + } else { + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 1 << (vmdq - 32)); + } + + return 0; +} + +/** + * txgbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 txgbe_init_uta_tables(struct txgbe_hw *hw) +{ + int i; + + for (i = 0; i < 128; i++) + wr32(hw, TXGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * txgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, TXGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than TXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = TXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * txgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + s32 ret_val = 0; + bool vfta_changed = false; + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call txgbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = txgbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, TXGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * txgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, TXGBE_CFG_PORT_CTL); + if (vt & TXGBE_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits; + + vlvf_index = txgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, TXGBE_PSR_VLAN_SWC, + (TXGBE_PSR_VLAN_SWC_VIEN | vlan)); + if ((!vlan_on) && (vfta_changed != NULL)) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + *vfta_changed = false; + } + } else + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + } + + return 0; +} + +/** + * txgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 txgbe_clear_vfta(struct txgbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, TXGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < TXGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + wr32(hw, TXGBE_PSR_VLAN_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_VLAN_SWC_VM_H, 0); + } + + return 0; +} + +/** + * txgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = hw->eeprom.sw_region_offset + TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (TCALL(hw, eeprom.ops.read, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (TCALL(hw, eeprom.ops.read, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (TCALL(hw, eeprom.ops.read, offset, wwnn_prefix)) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } + + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (TCALL(hw, eeprom.ops.read, offset, wwpn_prefix)) + goto wwn_prefix_err; + +wwn_prefix_out: + return 0; + +wwn_prefix_err: + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return 0; +} + + +/** + * txgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + if (enable) { + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, TXGBE_TDM_MAC_AS_L, + pfvfspoof & 0xffffffff); + wr32(hw, TXGBE_TDM_MAC_AS_H, pfvfspoof >> 32); + } else { + wr32(hw, TXGBE_TDM_MAC_AS_L, 0); + wr32(hw, TXGBE_TDM_MAC_AS_H, 0); + } +} + +/** + * txgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_VLAN_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_VLAN_AS_H, pfvfspoof); + } +} + +/** + * txgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_ETYPE_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_ETYPE_AS_H, pfvfspoof); + } +} + +/** + * txgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps) +{ + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * txgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 txgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * txgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return TXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + struct txgbe_hic_hdr *send_hdr = (struct txgbe_hic_hdr *)buffer; + u32 hdr_size = sizeof(struct txgbe_hic_hdr); + struct txgbe_hic_hdr *recv_hdr; + u32 buf[64] = {}; + u32 hicr, i, bi; + s32 status = 0; + u32 dword_len; + u16 buf_len; + u8 send_cmd; + + if (length == 0 || length > TXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer length failure buffersize=%d.\n", length); + return TXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (hw->mac.type == txgbe_mac_sp) + if (TCALL(hw, mac.ops.acquire_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return TXGBE_ERR_SWFW_SYNC; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer length failure, not aligned to dword"); + status = TXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + dword_len = length >> 2; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* try to get lock and lock */ + /* wait max to 50ms to get lock */ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__TXGBE_SWFW_BUSY, &adapter->state)) { + timeout--; + if (!timeout) + return TXGBE_ERR_TIMEOUT; + usleep_range(1000, 2000); + } + + /* index to unique seq id for each mbox message */ + send_hdr->cksum_or_index.index = adapter->swfw_index; + send_cmd = send_hdr->cmd; + + /* write data to SW-FW mbox array */ + for (i = 0; i < dword_len; i++) { + wr32a(hw, TXGBE_AML_MNG_MBOX_SW2FW, + i, TXGBE_CPU_TO_LE32(buffer[i])); + /* write flush */ + buf[i] = rd32a(hw, TXGBE_AML_MNG_MBOX_SW2FW, i); + } + + /* amlite: generate interrupt to notify FW */ + wr32m(hw, TXGBE_AML_MNG_MBOX_CTL_SW2FW, + TXGBE_AML_MNG_MBOX_NOTIFY, 0); + wr32m(hw, TXGBE_AML_MNG_MBOX_CTL_SW2FW, + TXGBE_AML_MNG_MBOX_NOTIFY, TXGBE_AML_MNG_MBOX_NOTIFY); + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* polling reply from FW */ + timeout = 50; + do { + timeout--; + usleep_range(1000, 2000); + + /* read hdr */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } + + /* check hdr */ + recv_hdr = (struct txgbe_hic_hdr *)buffer; + + if ((recv_hdr->cmd == send_cmd) && + (recv_hdr->cksum_or_index.index == adapter->swfw_index)) { + break; + } + } while (timeout); + + if (!timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Polling from FW messages timeout, cmd is 0x%x, index is %d\n", + send_cmd, adapter->swfw_index); + status = TXGBE_ERR_TIMEOUT; + goto rel_out; + } + + /* expect no reply from FW then return */ + /* release lock if return */ + if (!return_data) + goto rel_out; + + /* If there is any thing in data position pull it in */ + buf_len = recv_hdr->buf_len; + if (buf_len == 0) { + goto rel_out; + } + if (length < buf_len + hdr_size) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer not large enough for reply message.\n"); + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + for (; bi <= dword_len; bi++) { + buffer[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } + } else if (hw->mac.type == txgbe_mac_sp) { + /* legacy sw-fw mbox */ + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (txgbe_check_mng_access(hw)) { + wr32a(hw, TXGBE_MNG_MBOX, + i, TXGBE_CPU_TO_LE32(buffer[i])); + /* write flush */ + buf[i] = rd32a(hw, TXGBE_MNG_MBOX, i); + } + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (txgbe_check_mng_access(hw)) + wr32m(hw, TXGBE_MNG_MBOX_CTL, + TXGBE_MNG_MBOX_CTL_SWRDY, TXGBE_MNG_MBOX_CTL_SWRDY); + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + hicr = rd32(hw, TXGBE_MNG_MBOX_CTL); + if ((hicr & TXGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } + + buf[0] = rd32(hw, TXGBE_MNG_MBOX); + + if ((buf[0] & 0xff0000) >> 16 == 0x80) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "It's unknown cmd.\n"); + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "write value:\n"); + for (i = 0; i < dword_len; i++) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buffer[i]); + } + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "read value:\n"); + for (i = 0; i < dword_len; i++) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buf[i]); + } + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); + if( (buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct txgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + } + +rel_out: + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* index++, index replace txgbe_hic_hdr.checksum */ + adapter->swfw_index = send_hdr->cksum_or_index.index == TXGBE_HIC_HDR_INDEX_MAX ? + 0 : send_hdr->cksum_or_index.index + 1; + + clear_bit(__TXGBE_SWFW_BUSY, &adapter->state); + } else + TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB); + + return status; +} + +/** + * txgbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct txgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = 0; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + if (hw->mac.type == txgbe_mac_sp) { + fw_cmd.hdr.cksum_or_index.checksum = 0; + fw_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + } + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = txgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * txgbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + * + * Sends reset cmd to firmware through the manageability + * block. On success return 0 + * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 txgbe_reset_hostif(struct txgbe_hw *hw) +{ + struct txgbe_hic_reset reset_cmd; + int i; + s32 status = 0; + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + if (hw->mac.type == txgbe_mac_sp) { + reset_cmd.hdr.cksum_or_index.checksum = 0; + reset_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + } + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + hw->link_status = TXGBE_LINK_STATUS_NONE; + } + else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +static u16 txgbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct txgbe_hic_upg_start start_cmd; + struct txgbe_hic_upg_write write_cmd; + struct txgbe_hic_upg_verify verify_cmd; + u32 offset; + s32 status = 0; + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + + if (hw->mac.type == txgbe_mac_sp) { + start_cmd.hdr.cksum_or_index.checksum = 0; + start_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + } + + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + TXGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = txgbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = txgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + TXGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case TXGBE_MODULE_EEPROM: + verify_cmd.action_flag = TXGBE_RELOAD_EEPROM; + break; + case TXGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = TXGBE_RESET_FIRMWARE; + break; + case TXGBE_MODULE_HARDWARE: + verify_cmd.action_flag = TXGBE_RESET_LAN; + break; + default: + return status; + } + + if (hw->mac.type == txgbe_mac_sp) + verify_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + + status = txgbe_host_interface_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + TXGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + } + return status; +} + +/* cmd_addr is used for some special command: + * 1. to be sector address, when implemented erase sector command + * 2. to be flash address when implemented read, write flash address + */ +static int txgbe_fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0, time_out = 0; + + cmd_val = (cmd << SPI_CLK_CMD_OFFSET) | (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET) | cmd_addr; + wr32(hw, SPI_H_CMD_REG_ADDR, cmd_val); + while (1) { + if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1) + break; + + if (time_out == SPI_TIME_OUT_VALUE) + return -ETIMEDOUT; + + time_out = time_out + 1; + udelay(50); + } + + return 0; +} +static int fmgr_usr_cmd_op(struct txgbe_hw *hw, u32 usr_cmd) +{ + u8 status = 0; + + wr32(hw, SPI_H_USR_CMD_REG_ADDR, usr_cmd); + status = txgbe_fmgr_cmd_op(hw, SPI_CMD_USER_CMD, 0); + + return status; +} + +static int txgbe_flash_erase_chip(struct txgbe_hw *hw) +{ + return txgbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_CHIP, 0); +} + +static int txgbe_flash_erase_sector(struct txgbe_hw *hw, u32 sec_addr) +{ + return txgbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_SECTOR, sec_addr); +} +static int txgbe_flash_write_dword(struct txgbe_hw *hw, u32 addr, u32 dword) +{ + int status = 0; + u32 data; + + wr32(hw, SPI_H_DAT_REG_ADDR, dword); + status = txgbe_fmgr_cmd_op(hw, SPI_CMD_WRITE_DWORD, addr); + if (status) + return status; + + txgbe_flash_read_dword(hw, addr, &data); + if (dword != data) + return -EIO; + + return 0; + +} +int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data) +{ + int ret = 0; + + ret = txgbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (ret < 0) + return ret; + + *data = rd32(hw, SPI_H_DAT_REG_ADDR); + + return ret; + +} +static int txgbe_flash_write_unlock(struct txgbe_hw *hw) +{ + int status; + struct txgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x40; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000,false); + + return status; +} + +int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + u32 mac_addr0_dword0_t, mac_addr0_dword1_t, mac_addr1_dword0_t, mac_addr1_dword1_t; + u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + struct txgbe_adapter *adapter = hw->back; + u8 status = 0, skip = 0, flash_vendor = 0; + u32 sector_num = 0, read_data = 0, i = 0; + u32 sn[24]; + char sn_str[40]; + u8 sn_is_str = true; + u8 *vpd_tend = NULL; + u32 curadr = 0; + u32 vpdadr = 0; + u8 id_str_len, pn_str_len, sn_str_len, rv_str_len; + u32 mac_addr0_dword0_addr, mac_addr0_dword1_addr; + u32 mac_addr1_dword0_addr, mac_addr1_dword1_addr; + u16 subsystem_device_id; + u16 device_id; + u16 vpd_ro_len; + u32 chksum = 0; + u32 upgrade_check = 0x0; + int err = 0; + + if (hw->mac.type == txgbe_mac_sp) { + upgrade_check = PRB_CTL; + subsystem_device_id = data[0xfffdc] << 8 | data[0xfffdd]; + device_id = data[0xfffde] << 8 | data[0xfffdf]; + } else { + upgrade_check = PRB_SCRATCH; + if (data[0x3000] == 0x25 && data[0x3001] == 0x20) { + subsystem_device_id = data[0x302c] << 8 | data[0x302d]; + device_id = data[0x302e] << 8 | data[0x302f]; + } else { + subsystem_device_id = data[0xfffdc] << 8 | data[0xfffdd]; + device_id = data[0xfffde] << 8 | data[0xfffdf]; + } + } + + read_data = rd32(hw, upgrade_check); + if (read_data & 0x80000000) { + e_info(drv, "The flash has been successfully upgraded once, please reboot to make it work.\n"); + return -EOPNOTSUPP; + } + + /*check sub_id*/; + e_info(drv, "Checking sub_id .......\n"); + e_info(drv, "The card's sub_id : %04x\n", hw->subsystem_device_id); + e_info(drv, "The image's sub_id : %04x\n", subsystem_device_id); + + if ((hw->subsystem_device_id & 0xfff) == (subsystem_device_id & 0xfff)) { + e_info(drv, "It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff) { + e_info(drv, "update anyway\n"); + } else { + e_err(drv, "====The Gigabit image is not match the Gigabit card====\n"); + e_err(drv, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check dev_id*/ + e_info(drv, "Checking dev_id .......\n"); + e_info(drv, "The image's dev_id : %04x\n", device_id); + e_info(drv, "The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xfff0) == (device_id & 0xfff0)) && + !(hw->device_id == 0xffff)) + { + e_err(drv, "====The Gigabit image is not match the Gigabit card====\n"); + e_err(drv, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /* unlock flash write protect*/ + wr32(hw, TXGBE_SPI_CMDCFG0, 0x9f050206); + wr32(hw, 0x10194, 0x9f050206); + + msleep(1000); + + switch (hw->mac.type) { + case txgbe_mac_sp: + mac_addr0_dword0_addr = MAC_ADDR0_WORD0_OFFSET_1G; + mac_addr0_dword1_addr = MAC_ADDR0_WORD1_OFFSET_1G; + mac_addr1_dword0_addr = MAC_ADDR1_WORD0_OFFSET_1G; + mac_addr1_dword1_addr = MAC_ADDR1_WORD1_OFFSET_1G; + break; + case txgbe_mac_aml: + case txgbe_mac_aml40: + mac_addr0_dword0_addr = AMLITE_MAC_ADDR0_WORD0_OFFSET; + mac_addr0_dword1_addr = AMLITE_MAC_ADDR0_WORD1_OFFSET; + mac_addr1_dword0_addr = AMLITE_MAC_ADDR1_WORD0_OFFSET; + mac_addr1_dword1_addr = AMLITE_MAC_ADDR1_WORD1_OFFSET; + break; + default: + e_err(drv, "====Error mac type====\n"); + return -EOPNOTSUPP; + } + + txgbe_flash_read_dword(hw, mac_addr0_dword0_addr, &mac_addr0_dword0_t); + txgbe_flash_read_dword(hw, mac_addr0_dword1_addr, &mac_addr0_dword1_t); + mac_addr0_dword1_t = mac_addr0_dword1_t & U16_MAX; + txgbe_flash_read_dword(hw, mac_addr1_dword0_addr, &mac_addr1_dword0_t); + txgbe_flash_read_dword(hw, mac_addr1_dword1_addr, &mac_addr1_dword1_t); + mac_addr1_dword1_t = mac_addr1_dword1_t & U16_MAX; + + for (i = 0; i < 24; i++) { + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, &sn[i]); + } + if (sn[23] == U32_MAX) + sn_is_str = false; + + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, &serial_num_dword0_t); + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, &serial_num_dword1_t); + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, &serial_num_dword2_t); + e_info(drv, "Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); + e_info(drv, " MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); + + status = fmgr_usr_cmd_op(hw, 0x6); /* write enable*/ + status = fmgr_usr_cmd_op(hw, 0x98); /* global protection un-lock*/ + txgbe_flash_write_unlock(hw); + msleep(1000); + + //rebuild vpd + vpd_tend = kcalloc(256, sizeof(u8), GFP_KERNEL); + if (!vpd_tend) { + e_info(drv, "failed to allocate memory for vpd\n"); + return -ENOMEM; + } + memset(vpd_tend, 0xff, 256 * sizeof(u8)); + + curadr = TXGBE_VPD_OFFSET + 1; + id_str_len = data[curadr] | data[curadr + 1] << 8; + curadr += (7 + id_str_len); + pn_str_len = data[curadr]; + curadr += 1 + pn_str_len; + + for (i = 0; i < curadr - TXGBE_VPD_OFFSET; i++) { + vpd_tend[i] = data[TXGBE_VPD_OFFSET + i]; + } + + memset(sn_str, 0x0, sizeof(sn_str)); + if (sn_is_str) { + for (i = 0; i < 24; i++) { + sn_str[i] = sn[23-i]; + } + sn_str_len = strlen(sn_str); + } else { + sn_str_len = 0x12; + sprintf(sn_str ,"%02x%08x%08x",(serial_num_dword2_t & 0xff), serial_num_dword1_t, serial_num_dword0_t); + } + + vpdadr = curadr - TXGBE_VPD_OFFSET; + + if (data[curadr] == 'S' && data[curadr + 1] == 'N') { + if (data[curadr + 2]) { + for (i = sn_str_len; i < data[curadr + 2]; i++) + sn_str[i] = 0x20; + sn_str_len = data[curadr + 2]; + } + curadr += 3 + data[curadr + 2]; + rv_str_len = data[2 + curadr]; + } else { + rv_str_len = data[2 + curadr]; + } + + vpd_tend[vpdadr] = 'S'; + vpd_tend[vpdadr + 1] = 'N'; + vpd_tend[vpdadr + 2] = sn_str_len; + + for (i = 0; i < sn_str_len; i++) + vpd_tend[vpdadr + 3 + i] = sn_str[i]; + + vpdadr = vpdadr+ 3 + sn_str_len; + + for (i = 0; i < 3; i++) + vpd_tend[vpdadr + i] = data [curadr + i]; + + vpdadr += 3; + for (i = 0; i < rv_str_len; i++) + vpd_tend[vpdadr + i] = 0x0; + + vpdadr += rv_str_len; + vpd_ro_len = pn_str_len + sn_str_len + rv_str_len + 9; + vpd_tend[4 + id_str_len] = vpd_ro_len & 0xff; + vpd_tend[5 + id_str_len] = (vpd_ro_len >> 8) & 0xff; + + for (i = 0; i < vpdadr; i++) + chksum += vpd_tend[i]; + chksum = ~(chksum & 0xff) + 1; + vpd_tend[vpdadr - rv_str_len] = chksum; + vpd_tend[vpdadr] = 0x78; + + /*Note: for Spanish FLASH, first 8 sectors (4KB) in sector0 (64KB) + need to use a special erase command (4K sector erase)*/ + if (flash_vendor == 1) { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < 8; i++) { + txgbe_flash_erase_sector(hw, i * 128); + msleep(20); // 20 ms + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + sector_num = size / SPI_SECTOR_SIZE; + /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ + if (flash_vendor == 2) { + status = txgbe_flash_erase_chip(hw); + e_err(drv, "Erase chip command, return status = %0d\n", status); + msleep(1000); + } else { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < sector_num; i++) { + status = txgbe_flash_erase_sector(hw, i * SPI_SECTOR_SIZE); + if (status) + e_err(drv, "Erase sector[%2d] command, return status = %0d\n", i, status); + msleep(50); + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Program Image file in dword*/ + for (i = 0; i < size / 4; i++) { + read_data = data[4 * i + 3] << 24 | data[4 * i + 2] << 16 | data[4 * i + 1] << 8 | data[4 * i]; + read_data = __le32_to_cpu(read_data); + skip = ((i * 4 == mac_addr0_dword0_addr) || (i * 4 == mac_addr0_dword1_addr) || + (i * 4 == mac_addr1_dword0_addr) || (i * 4 == mac_addr1_dword1_addr) || + (i * 4 >= PRODUCT_SERIAL_NUM_OFFSET_1G && i * 4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 92) || + (i * 4 >= TXGBE_VPD_OFFSET && i * 4 < TXGBE_VPD_END) || + (i * 4 == 0x15c)); + if (read_data != U32_MAX && !skip) { + status = txgbe_flash_write_dword(hw, i * 4, read_data); + if (status) { + e_err(drv, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); + txgbe_flash_read_dword(hw, i * 4, &read_data); + e_err(drv, " Read data from Flash is: 0x%08x\n", read_data); + err = -EBUSY; + goto err_exit; + } + } + } + + for (i = 0; i < 256 / 4; i++) { + read_data = vpd_tend[4 * i + 3] << 24 | vpd_tend[4 * i + 2] << 16 | vpd_tend[4 * i + 1] << 8 | vpd_tend[4 * i]; + read_data = __le32_to_cpu(read_data); + if (read_data != U32_MAX) { + status = txgbe_flash_write_dword(hw, TXGBE_VPD_OFFSET + i * 4, read_data); + if (status) { + e_err(drv, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); + txgbe_flash_read_dword(hw, i * 4, &read_data); + e_err(drv, " Read data from Flash is: 0x%08x\n", read_data); + err = -EBUSY; + goto err_exit; + } + } + } + + chksum = 0; + for (i = 0; i< 0x1000; i += 2) { + if (i >= TXGBE_VPD_OFFSET && i < TXGBE_VPD_END) { + chksum += (vpd_tend[i - TXGBE_VPD_OFFSET + 1] << 8 | vpd_tend[i - TXGBE_VPD_OFFSET]); + } else if (i == 0x15e) { + continue; + } else { + chksum += (data[i + 1] << 8 | data[i]); + } + } + chksum = 0xbaba - chksum; + chksum &= 0xffff; + status = txgbe_flash_write_dword(hw, 0x15e, 0xffff0000 | chksum); + + txgbe_flash_write_dword(hw, mac_addr0_dword0_addr, mac_addr0_dword0_t); + txgbe_flash_write_dword(hw, mac_addr0_dword1_addr, (mac_addr0_dword1_t | 0x80000000));//lan0 + txgbe_flash_write_dword(hw, mac_addr1_dword0_addr, mac_addr1_dword0_t); + txgbe_flash_write_dword(hw, mac_addr1_dword1_addr, (mac_addr1_dword1_t | 0x80000000));//lan1 + if (sn_is_str) { + for (i = 0; i < 24; i++) { + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, sn[i]); + } + } else { + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, serial_num_dword1_t); + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, serial_num_dword2_t); + } + + wr32(hw, upgrade_check, rd32(hw, upgrade_check) | 0x80000000); + +err_exit: + if (vpd_tend) + kfree(vpd_tend); + return err; +} + + +/** + * txgbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* txgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= TXGBE_RDB_PB_SZ_SHIFT; + for (; i < (num_pb / 2); i++) + wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize); + fallthrough; + /* Fall through to configure remaining packet buffers */ + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << TXGBE_RDB_PB_SZ_SHIFT; + for (; i < num_pb; i++) + wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = TXGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + wr32(hw, TXGBE_TDB_PB_SZ(i), txpktsize); + wr32(hw, TXGBE_TDM_PB_THRE(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < TXGBE_MAX_PB; i++) { + wr32(hw, TXGBE_RDB_PB_SZ(i), 0); + wr32(hw, TXGBE_TDB_PB_SZ(i), 0); + wr32(hw, TXGBE_TDM_PB_THRE(i), 0); + } +} + + +/*STATIC const u8 txgbe_emc_temp_data[4] = { + TXGBE_EMC_INTERNAL_DATA, + TXGBE_EMC_DIODE1_DATA, + TXGBE_EMC_DIODE2_DATA, + TXGBE_EMC_DIODE3_DATA +}; +STATIC const u8 txgbe_emc_therm_limit[4] = { + TXGBE_EMC_INTERNAL_THERM_LIMIT, + TXGBE_EMC_DIODE1_THERM_LIMIT, + TXGBE_EMC_DIODE2_THERM_LIMIT, + TXGBE_EMC_DIODE3_THERM_LIMIT +};*/ + +/** + * txgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw) +{ + s64 tsv; + int i = 0; + struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + u32 data_code; + int temp_data, temp_fraction; + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return TXGBE_NOT_IMPLEMENTED; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_AML_TS_ENA, 0x0001); + + while(1) { + data_code = rd32(hw, TXGBE_AML_TS_STS); + if ((data_code & TXGBE_AML_TS_STS_VLD) != 0) + break; + msleep(1); + if (i++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait 0x1033c Timeout!!!\n"); + return -1; + } + } + + data_code = data_code & 0xFFF; + temp_data = 419400 + 2205 * (data_code * 1000 / 4094 - 500); + + //Change double Temperature to int + tsv = temp_data/10000; + temp_fraction = temp_data - (tsv * 10000); + if (temp_fraction >= 5000) { + tsv += 1; + } + data->sensor.temp = (s16)tsv; + } else { + tsv = (s64)(rd32(hw, TXGBE_TS_ST) & + TXGBE_TS_ST_DATA_OUT_MASK); + + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + tsv * (31020 << 8) / 100000 + - tsv * tsv * (18201 << 8) / 100000000 + + tsv * tsv * tsv * (81542 << 8) / 1000000000000 + - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; + tsv >>= 8; + + data->sensor.temp = (s16)tsv; + + for (i = 0; i < 100 ; i++){ + tsv = (s64)rd32(hw, TXGBE_TS_ST); + if( tsv >> 16 == 0x1 ){ + tsv = tsv & TXGBE_TS_ST_DATA_OUT_MASK; + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + tsv * (31020 << 8) / 100000 + - tsv * tsv * (18201 << 8) / 100000000 + + tsv * tsv * tsv * (81542 << 8) / 1000000000000 + - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; + tsv >>= 8; + data->sensor.temp = (s16)tsv; + break; + }else{ + msleep(1); + continue; + } + } + } + return 0; +} + +/** + * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) +{ + s32 status = 0; + + struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct txgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return TXGBE_NOT_IMPLEMENTED; + + data->sensor.alarm_thresh = 100; + data->sensor.dalarm_thresh = 90; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_AML_TS_ENA, 0x0); + wr32(hw, TXGBE_AML_INTR_RAW_LO, TXGBE_AML_INTR_CL_LO); + wr32(hw, TXGBE_AML_INTR_RAW_HI, TXGBE_AML_INTR_CL_HI); + + wr32(hw, TXGBE_AML_INTR_HIGH_EN, TXGBE_AML_INTR_EN_HI); + wr32(hw, TXGBE_AML_INTR_LOW_EN, TXGBE_AML_INTR_EN_LO); + + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_EVAL_MODE_MASK, 0x10); + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_ALARM_THRE_MASK, 0x186a0000); //100 degree centigrade + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_DALARM_THRE_MASK, 0x16f60); //90 degree centigrade + wr32(hw, TXGBE_AML_TS_ENA, 0x1); + } else { + wr32(hw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + wr32(hw, TXGBE_TS_INT_EN, + TXGBE_TS_INT_EN_ALARM_INT_EN | TXGBE_TS_INT_EN_DALARM_INT_EN); + wr32(hw, TXGBE_TS_EN, TXGBE_TS_EN_ENA); + + wr32(hw, TXGBE_TS_ALARM_THRE, 677); + wr32(hw, TXGBE_TS_DALARM_THRE, 614); + } + return status; +} + +void txgbe_disable_rx(struct txgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = rd32(hw, TXGBE_RDB_PB_CTL); + if (rxctrl & TXGBE_RDB_PB_CTL_RXEN) { + pfdtxgswc = rd32(hw, TXGBE_PSR_CTL); + if (pfdtxgswc & TXGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~TXGBE_PSR_CTL_SW_EN; + wr32(hw, TXGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~TXGBE_RDB_PB_CTL_RXEN; + wr32(hw, TXGBE_RDB_PB_CTL, rxctrl); + /* errata 14 */ + if (hw->revision_id == TXGBE_SP_MPW) { + do { + do { + if (rd32m(hw, + TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_DISABLED) == 1) + break; + msleep(10); + } while (1); + if (rd32m(hw, TXGBE_RDB_TXSWERR, + TXGBE_RDB_TXSWERR_TB_FREE) == 0x143) + break; + else { + wr32m(hw, + TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, + TXGBE_RDB_PB_CTL_RXEN); + wr32m(hw, + TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, + ~TXGBE_RDB_PB_CTL_RXEN); + + } + } while (1); + } + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)|| + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))){ + /* disable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, 0); + } + } +} + + +void txgbe_enable_rx(struct txgbe_hw *hw) +{ + u32 pfdtxgswc; + + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + + wr32m(hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, TXGBE_PSR_CTL); + pfdtxgswc |= TXGBE_PSR_CTL_SW_EN; + wr32(hw, TXGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +/** + * txgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + */ +bool txgbe_mng_present(struct txgbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, TXGBE_MIS_ST); + return fwsm & TXGBE_MIS_ST_MNG_INIT_DN; +} + +bool txgbe_check_mng_access(struct txgbe_hw *hw) +{ + bool ret = false; + u32 rst_delay; + u32 i; + + struct txgbe_adapter *adapter = hw->back; + if (!txgbe_mng_present(hw)) + return false; + if (adapter->hw.revision_id != TXGBE_SP_MPW) + return true; + if (!(adapter->flags2 & TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED)) + return true; + + rst_delay = (rd32(&adapter->hw, TXGBE_MIS_RST_ST) & + TXGBE_MIS_RST_ST_RST_INIT) >> + TXGBE_MIS_RST_ST_RST_INI_SHIFT; + for (i = 0; i < rst_delay + 2; i++) { + if (!(adapter->flags2 & TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED)) { + ret = true; + break; + } + msleep(100); + } + return ret; +} + +/** + * txgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN; + s32 status = 0; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + struct txgbe_adapter *adapter = hw->back; + + /* Mask off requested but non-supported speeds */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_speed, &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & TXGBE_LINK_SPEED_25GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_25GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if ((link_speed == TXGBE_LINK_SPEED_25GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_25GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + TCALL(hw, mac.ops.flap_tx_laser); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. sapphire uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if ((link_speed == TXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + TCALL(hw, mac.ops.flap_tx_laser); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. sapphire uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) { + u32 curr_autoneg = 2; + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + + if ((link_speed == TXGBE_LINK_SPEED_1GB_FULL) && link_up + && (adapter->autoneg == curr_autoneg)) + goto out; + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + TCALL(hw, mac.ops.flap_tx_laser); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = txgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_40GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_40GB_FULL; + + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, TXGBE_SPI_STATUS) & + TXGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < TXGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, TXGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) { + /* done */ + break; + } + msleep(200); + } + if (i == TXGBE_MAX_FLASH_LOAD_POLL_TIME) { + err = TXGBE_ERR_FLASH_LOADING_FAILED; + } + } + return err; +} + +/* The txgbe_ptype_lookup is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT txgbe_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF txgbe_ptype_lookup[ptype].mac == TXGBE_DEC_PTYPE_MAC_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum txgbe_l2_ptypes to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define TXGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ TXGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ TXGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ TXGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ TXGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ TXGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ TXGBE_DEC_PTYPE_LAYER_##layer } + +#define TXGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +txgbe_dptype txgbe_ptype_lookup[256] = { + TXGBE_UKN(0x00), + TXGBE_UKN(0x01), + TXGBE_UKN(0x02), + TXGBE_UKN(0x03), + TXGBE_UKN(0x04), + TXGBE_UKN(0x05), + TXGBE_UKN(0x06), + TXGBE_UKN(0x07), + TXGBE_UKN(0x08), + TXGBE_UKN(0x09), + TXGBE_UKN(0x0A), + TXGBE_UKN(0x0B), + TXGBE_UKN(0x0C), + TXGBE_UKN(0x0D), + TXGBE_UKN(0x0E), + TXGBE_UKN(0x0F), + + /* L2: mac */ + TXGBE_UKN(0x10), + TXGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + TXGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + TXGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + TXGBE_UKN(0x20), + TXGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + TXGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x26), + TXGBE_UKN(0x27), + TXGBE_UKN(0x28), + TXGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + TXGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x2E), + TXGBE_UKN(0x2F), + + /* L2: fcoe */ + TXGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x35), + TXGBE_UKN(0x36), + TXGBE_UKN(0x37), + TXGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x3D), + TXGBE_UKN(0x3E), + TXGBE_UKN(0x3F), + + TXGBE_UKN(0x40), + TXGBE_UKN(0x41), + TXGBE_UKN(0x42), + TXGBE_UKN(0x43), + TXGBE_UKN(0x44), + TXGBE_UKN(0x45), + TXGBE_UKN(0x46), + TXGBE_UKN(0x47), + TXGBE_UKN(0x48), + TXGBE_UKN(0x49), + TXGBE_UKN(0x4A), + TXGBE_UKN(0x4B), + TXGBE_UKN(0x4C), + TXGBE_UKN(0x4D), + TXGBE_UKN(0x4E), + TXGBE_UKN(0x4F), + TXGBE_UKN(0x50), + TXGBE_UKN(0x51), + TXGBE_UKN(0x52), + TXGBE_UKN(0x53), + TXGBE_UKN(0x54), + TXGBE_UKN(0x55), + TXGBE_UKN(0x56), + TXGBE_UKN(0x57), + TXGBE_UKN(0x58), + TXGBE_UKN(0x59), + TXGBE_UKN(0x5A), + TXGBE_UKN(0x5B), + TXGBE_UKN(0x5C), + TXGBE_UKN(0x5D), + TXGBE_UKN(0x5E), + TXGBE_UKN(0x5F), + TXGBE_UKN(0x60), + TXGBE_UKN(0x61), + TXGBE_UKN(0x62), + TXGBE_UKN(0x63), + TXGBE_UKN(0x64), + TXGBE_UKN(0x65), + TXGBE_UKN(0x66), + TXGBE_UKN(0x67), + TXGBE_UKN(0x68), + TXGBE_UKN(0x69), + TXGBE_UKN(0x6A), + TXGBE_UKN(0x6B), + TXGBE_UKN(0x6C), + TXGBE_UKN(0x6D), + TXGBE_UKN(0x6E), + TXGBE_UKN(0x6F), + TXGBE_UKN(0x70), + TXGBE_UKN(0x71), + TXGBE_UKN(0x72), + TXGBE_UKN(0x73), + TXGBE_UKN(0x74), + TXGBE_UKN(0x75), + TXGBE_UKN(0x76), + TXGBE_UKN(0x77), + TXGBE_UKN(0x78), + TXGBE_UKN(0x79), + TXGBE_UKN(0x7A), + TXGBE_UKN(0x7B), + TXGBE_UKN(0x7C), + TXGBE_UKN(0x7D), + TXGBE_UKN(0x7E), + TXGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + TXGBE_UKN(0x80), + TXGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0x86), + TXGBE_UKN(0x87), + TXGBE_UKN(0x88), + TXGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0x8E), + TXGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + TXGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0x96), + TXGBE_UKN(0x97), + TXGBE_UKN(0x98), + TXGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0x9E), + TXGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xA6), + TXGBE_UKN(0xA7), + TXGBE_UKN(0xA8), + TXGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xAE), + TXGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xB6), + TXGBE_UKN(0xB7), + TXGBE_UKN(0xB8), + TXGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xBE), + TXGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + TXGBE_UKN(0xC0), + TXGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0xC6), + TXGBE_UKN(0xC7), + TXGBE_UKN(0xC8), + TXGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0xCE), + TXGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + TXGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0xD6), + TXGBE_UKN(0xD7), + TXGBE_UKN(0xD8), + TXGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0xDE), + TXGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xE6), + TXGBE_UKN(0xE7), + TXGBE_UKN(0xE8), + TXGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xEE), + TXGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + TXGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xF6), + TXGBE_UKN(0xF7), + TXGBE_UKN(0xF8), + TXGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xFE), + TXGBE_UKN(0xFF), +}; + + +void txgbe_init_mac_link_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + /* + * enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = txgbe_setup_mac_link_sp; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link_sp; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 txgbe_init_phy_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_sp(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper) { + hw->phy.type = txgbe_phy_xaui; + if ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) { + mac->ops.setup_link = txgbe_setup_copper_link; + mac->ops.get_link_capabilities = + txgbe_get_copper_link_capabilities; + } + } + +init_phy_ops_out: + return ret_val; +} + +static s32 txgbe_setup_sfp_modules_sp(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + DEBUGFUNC("txgbe_setup_sfp_modules_sp"); + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_sp(hw); + } + + return ret_val; +} + + +/** + * txgbe_init_ops_sp - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for sapphire. + * Does not touch the hardware. + **/ + +static s32 txgbe_init_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_sp; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_sp; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_sp; + + /* LINK */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_sp; + mac->ops.setup_link = txgbe_setup_mac_link_sp; + mac->ops.check_link = txgbe_check_mac_link_sp; + + return ret_val; +} + +static void txgbe_set_mac_type(struct txgbe_hw *hw) +{ + switch (hw->device_id) { + case TXGBE_DEV_ID_SP1000: + case TXGBE_DEV_ID_WX1820: + hw->mac.type = txgbe_mac_sp; + break; + case TXGBE_DEV_ID_AML: + case TXGBE_DEV_ID_AML5025: + case TXGBE_DEV_ID_AML5125: + hw->mac.type = txgbe_mac_aml; + break; + case TXGBE_DEV_ID_AML5040: + case TXGBE_DEV_ID_AML5140: + hw->mac.type = txgbe_mac_aml40; + break; + default: + hw->mac.type = txgbe_mac_unknown; + break; + } +} + +/** + * txgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The txgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +int txgbe_init_shared_code(struct txgbe_hw *hw) +{ + s32 status; + + txgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case txgbe_mac_sp: + status = txgbe_init_ops_sp(hw); + break; + case txgbe_mac_aml: + status = txgbe_init_ops_aml(hw); + break; + case txgbe_mac_aml40: + status = txgbe_init_ops_aml40(hw); + break; + default: + status = TXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + + return status; +} + + +s32 txgbe_init_ops_generic(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + struct txgbe_eeprom_info *eeprom = &hw->eeprom; + struct txgbe_flash_info *flash = &hw->flash; + s32 ret_val = 0; + + /* PHY */ + phy->ops.reset = txgbe_reset_phy; + phy->ops.read_reg = txgbe_read_phy_reg; + phy->ops.write_reg = txgbe_write_phy_reg; + phy->ops.read_reg_mdi = txgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = txgbe_write_phy_reg_mdi; + phy->ops.setup_link = txgbe_setup_phy_link; + phy->ops.setup_link_speed = txgbe_setup_phy_link_speed; + phy->ops.get_firmware_version = txgbe_get_phy_firmware_version; + phy->ops.read_i2c_byte = txgbe_read_i2c_byte; + phy->ops.write_i2c_byte = txgbe_write_i2c_byte; + phy->ops.read_i2c_sff8472 = txgbe_read_i2c_sff8472; + phy->ops.read_i2c_sff8636 = txgbe_read_i2c_sff8636; + phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom; + phy->ops.read_i2c_sfp_phy = txgbe_read_i2c_sfp_phy; + phy->ops.write_i2c_eeprom = txgbe_write_i2c_eeprom; + phy->ops.identify_sfp = txgbe_identify_module; + phy->sfp_type = txgbe_sfp_type_unknown; + phy->ops.check_overtemp = txgbe_tn_check_overtemp; + phy->ops.identify = txgbe_identify_phy; + + /* MAC */ + mac->ops.init_hw = txgbe_init_hw; + mac->ops.clear_hw_cntrs = txgbe_clear_hw_cntrs; + mac->ops.get_mac_addr = txgbe_get_mac_addr; + mac->ops.stop_adapter = txgbe_stop_adapter; + mac->ops.get_bus_info = txgbe_get_bus_info; + mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = txgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = txgbe_release_swfw_sync; + mac->ops.reset_hw = txgbe_reset_hw; + mac->ops.get_media_type = NULL; + mac->ops.disable_sec_rx_path = txgbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = txgbe_enable_sec_rx_path; + mac->ops.disable_sec_tx_path = txgbe_disable_sec_tx_path; + mac->ops.enable_sec_tx_path = txgbe_enable_sec_tx_path; + mac->ops.enable_rx_dma = txgbe_enable_rx_dma; + mac->ops.start_hw = txgbe_start_hw; + mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr; + mac->ops.set_san_mac_addr = txgbe_set_san_mac_addr; + mac->ops.get_device_caps = txgbe_get_device_caps; + mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix; + mac->ops.setup_eee = txgbe_setup_eee; + + /* LEDs */ + mac->ops.led_on = txgbe_led_on; + mac->ops.led_off = txgbe_led_off; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = txgbe_set_rar; + mac->ops.clear_rar = txgbe_clear_rar; + mac->ops.init_rx_addrs = txgbe_init_rx_addrs; + mac->ops.update_uc_addr_list = txgbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = txgbe_update_mc_addr_list; + mac->ops.enable_mc = txgbe_enable_mc; + mac->ops.disable_mc = txgbe_disable_mc; + mac->ops.enable_rx = txgbe_enable_rx; + mac->ops.disable_rx = txgbe_disable_rx; + mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac; + mac->ops.insert_mac_addr = txgbe_insert_mac_addr; + mac->rar_highwater = 1; + mac->ops.set_vfta = txgbe_set_vfta; + mac->ops.set_vlvf = txgbe_set_vlvf; + mac->ops.clear_vfta = txgbe_clear_vfta; + mac->ops.init_uta_tables = txgbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = txgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = txgbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + txgbe_set_ethertype_anti_spoofing; + + /* Flow Control */ + mac->ops.fc_enable = txgbe_fc_enable; + mac->ops.setup_fc = txgbe_setup_fc; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.check_link = NULL; + mac->ops.setup_rxpba = txgbe_set_rxpba; + mac->mcft_size = TXGBE_SP_MC_TBL_SIZE; + mac->vft_size = TXGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = TXGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = TXGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = txgbe_get_pcie_msix_count(hw); + + mac->arc_subsystem_valid = (rd32(hw, TXGBE_MIS_ST) & + TXGBE_MIS_ST_MNG_INIT_DN) ? true : false; + + hw->mbx.ops.init_params = txgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.init_params = txgbe_init_eeprom_params; + eeprom->ops.calc_checksum = txgbe_calc_eeprom_checksum; + eeprom->ops.read = txgbe_read_ee_hostif; + eeprom->ops.read_buffer = txgbe_read_ee_hostif_buffer; + eeprom->ops.write = txgbe_write_ee_hostif; + eeprom->ops.write_buffer = txgbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = txgbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = txgbe_validate_eeprom_checksum; + + /* FLASH */ + flash->ops.init_params = txgbe_init_flash_params; + flash->ops.read_buffer = txgbe_read_flash_buffer; + flash->ops.write_buffer = txgbe_write_flash_buffer; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = txgbe_set_fw_drv_ver; + + mac->ops.get_thermal_sensor_data = + txgbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + txgbe_init_thermal_sensor_thresh; + + mac->ops.get_rtrup2tc = txgbe_dcb_get_rtrup2tc; + + return ret_val; +} + +/** + * txgbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 txgbe_get_link_capabilities_sp(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl; + u32 sr_an_mmd_adv_reg2; + + if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } else if (hw->dac_sfp) { + *autoneg = true; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR; + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) { + /* Check if 1G SFP module. */ + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + /* SFP */ + else if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + } + /* XAUI */ + else if ((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper) && + ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_SFI_XAUI)) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_T; + } + /* SGMII */ + else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII) { + *speed = TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | + TXGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_T | + TXGBE_PHYSICAL_LAYER_100BASE_TX; + /* MAC XAUI */ + } else if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4; + /* MAC SGMII */ + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } + /* KR KX KX4 */ + else { + /* + * Determine link capabilities based on the stored value, + * which represents EEPROM defaults. If value has not + * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) { + sr_pcs_ctl = hw->mac.orig_sr_pcs_ctl2; + sr_pma_mmd_ctl1 = hw->mac.orig_sr_pma_mmd_ctl1; + sr_an_mmd_ctl = hw->mac.orig_sr_an_mmd_ctl; + sr_an_mmd_adv_reg2 = hw->mac.orig_sr_an_mmd_adv_reg2; + } else { + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, + TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + } + + if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X && + (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) + == TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + /* 1G or KX - no backplane auto-negotiation */ + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X && + (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) + == TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4; + } else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + /* 10 GbE serial link (KR -no backplane auto-negotiation) */ + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR; + } else if ((sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE)) { + /* KX/KX4/KR backplane auto-negotiation enable */ + *speed = TXGBE_LINK_SPEED_UNKNOWN; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX) + *speed |= TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR | + TXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + } + +out: + return status; +} + +/** + * txgbe_get_media_type_sp - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum txgbe_media_type txgbe_get_media_type_sp(struct txgbe_hw *hw) +{ + enum txgbe_media_type media_type; + u8 device_type = hw->subsystem_device_id & 0xF0; + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case txgbe_phy_cu_unknown: + case txgbe_phy_tn: + media_type = txgbe_media_type_copper; + goto out; + default: + break; + } + + switch (device_type) { + case TXGBE_ID_MAC_XAUI: + case TXGBE_ID_MAC_SGMII: + case TXGBE_ID_KR_KX_KX4: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber; + break; + case TXGBE_ID_XAUI: + case TXGBE_ID_SGMII: + media_type = txgbe_media_type_copper; + break; + case TXGBE_ID_SFI_XAUI: + if (hw->bus.lan_id == 0) + media_type = txgbe_media_type_fiber; + else + media_type = txgbe_media_type_copper; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * txgbe_stop_mac_link_on_d3 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void txgbe_stop_mac_link_on_d3(struct txgbe_hw *hw) +{ + /* fix autoc2 */ + UNREFERENCED_PARAMETER(hw); + return; +} + + +/** + * txgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); + + if (!((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber_qsfp))) + return; + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + + if (txgbe_close_notify(hw)) { + /* over write led when ifconfig down */ + if (hw->mac.type == txgbe_mac_aml40) { + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_40G | + TXGBE_AMLITE_LED_LINK_ACTIVE); + } else if (hw->mac.type == txgbe_mac_aml) + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_25G | + TXGBE_AMLITE_LED_LINK_10G | TXGBE_AMLITE_LED_LINK_ACTIVE); + else + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); + } + /* Disable Tx laser; allow 100us to go dark per spec */ + if (hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_GPIO_DDR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + esdp_reg &= ~TXGBE_GPIO_DR_1; + } else if (hw->mac.type == txgbe_mac_aml) { + esdp_reg |= TXGBE_GPIO_DR_1; + } else { + esdp_reg |= TXGBE_GPIO_DR_1 | TXGBE_GPIO_DR_0; + } + + wr32(hw, TXGBE_GPIO_DR, esdp_reg); + TXGBE_WRITE_FLUSH(hw); + usec_delay(100); +} + +/** + * txgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + if (!((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber_qsfp))) + return; + + if (txgbe_open_notify(hw)) + /* recover led configure when ifconfig up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + + /* Enable Tx laser; allow 100ms to light up */ + if (hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_GPIO_DDR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + wr32m(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + } else { + wr32m(hw, TXGBE_GPIO_DR, + TXGBE_GPIO_DR_0 | TXGBE_GPIO_DR_1, 0); + } + TXGBE_WRITE_FLUSH(hw); + msec_delay(100); +} + +/** + * txgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + if (!((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber_qsfp))) + return; + + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + + if (hw->mac.autotry_restart) { + txgbe_disable_tx_laser_multispeed_fiber(hw); + txgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * txgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via RS0/RS1 rate select pins. + */ +void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, + u32 speed) +{ + u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); + + switch (speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + /*amlite TODO*/ + break; + case TXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~(TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4); + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_5 | TXGBE_GPIO_DDR_4 | + TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_0); + + wr32(hw, TXGBE_GPIO_DR, esdp_reg); + + TXGBE_WRITE_FLUSH(hw); +} +s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) +{ + u32 value; + struct txgbe_adapter *adapter = hw->back; + + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002); + + /* for sgmii + external phy, set to 0x0105 (phy sgmii mode) */ + /* for sgmii direct link, set to 0x010c (mac sgmii mode) */ + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII || + TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) { + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x010c); + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0105); + } + + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0200); + + value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + value = (value & ~0x1200) | (0x1 << 9); + if(adapter->autoneg) + value |= (0x1 << 12); + + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, value); + + return 0; +} + +int txgbe_enable_rx_adapter(struct txgbe_hw *hw) +{ + int ret = 0; + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value |= BIT(12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + value = 0; + ret = read_poll_timeout(txgbe_rd32_epcs, value, (value & BIT(11)), 1000, + 200000, false, hw, TXGBE_PHY_RX_AD_ACK); + if (ret) + return -ETIMEDOUT; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value &= ~BIT(12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + return 0; +} + +s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) +{ + u32 i; + s32 status = 0; + u32 value = 0; + struct txgbe_adapter *adapter = hw->back; + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + txgbe_wr32_epcs(hw, 0x78002, 0x0); + txgbe_wr32_epcs(hw, 0x78001, 0x7); + if (AN73_TRAINNING_MODE == 1) + txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x1); + + /* 2. Disable xpcs AN-73 */ + if (adapter->backplane_an == 1) { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + /* bit8:CA_TX_EQ bit7:an_preset bit6:TX_EQ_OVR_RIDE */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value &= ~0x40; + value |= BIT(8); + txgbe_wr32_epcs(hw, 0x18037, value); + } else { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0); + } + + if (KR_FEC == 1) + txgbe_wr32_epcs(hw, 0x70012, 0xc000 | txgbe_rd32_epcs(hw, 0x70012)); + + //txgbe_wr32_epcs(hw, 0x18006, 0xffff); + //txgbe_wr32_epcs(hw, 0x18008, 0xA697); + + /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register */ + /* Bit[10:0](MPLLA_BANDWIDTH) = 11'd123 (default: 11'd16) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR); + + /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register */ + /* Bit[12:8](RX_VREF_CTRL) = 5'hF (default: 5'h11) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, + 0xCF00); + + /* 5. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register */ + /* Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5](CTLE_POLE_0) = 3'h2 + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, + 0x774A); + + /* 6. Set VR_MII_Gen5_12G_RX_GENCTRL3 Register */ + /* Bit[2:0](LOS_TRSHLD_0) = 3'h4 (default: 3) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, + 0x0004); + /* 7. Initialize the mode by setting VR XS or PCS MMD Digital */ + /* Control1 Register Bit[15](VR_RST) */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, + 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if ((KR_SET == 1) || (adapter->ffe_set == TXGBE_BP_M_KR)) { + e_info(hw, "Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } +out: + return status; +} + +s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) +{ + u32 i; + s32 status = 0; + u32 value; + struct txgbe_adapter *adapter = hw->back; + + /* check link status, if already set, skip setting it again */ + if (hw->link_status == TXGBE_LINK_STATUS_KX4) { + goto out; + } + e_dev_info("It is set to kx4.\n"); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2501); + /* Reset rx lane0-3 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + txgbe_wr32_ephy(hw, 0x1105, 0x4001); + txgbe_wr32_ephy(hw, 0x1205, 0x4001); + txgbe_wr32_ephy(hw, 0x1305, 0x4001); + } else { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x250A); + TXGBE_WRITE_FLUSH(hw); + msleep(1); + + /* Set the eth change_mode bit first in mis_rst register + * for corresponding LAN port + */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + + /* Set SR PCS Control2 Register Bits[1:0] = 2'b01 PCS_TYPE_SEL: non KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X); + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b1 SS13: 10G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G); + + value = (0xf5f0 & ~0x7F0) | (0x5 << 8) | (0x7 << 5) | 0xF0; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI) + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + else + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00); + + for (i = 0; i < 4; i++) { + if (i == 0) + value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + else + value = (0xff06 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value); + } + + value = 0x0 & ~0x7777; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + + value = (0x6db & ~0xFFF) | (0x1 << 9) | (0x1 << 6) | (0x1 << 3) | 0x1; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA */ + /* Control 0 Register Bit[7:0] = 8'd40 MPLLA_MULTIPLIER */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, + TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER); + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA */ + /* Control 3 Register Bit[10:0] = 11'd86 MPLLA_BANDWIDTH */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 0 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_0 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 1 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 2 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_2 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 3 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_3 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Reference 0 Register Bit[5:0] = 6'd34 VCO_REF_LD_0/1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Reference 1 Register Bit[5:0] = 6'd34 VCO_REF_LD_2/3 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE */ + /* Enable Register Bit[7:0] = 8'd0 AFE_EN_0/3_1, DFE_EN_0/3_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, + 0x0); + + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx */ + /* Equalization Control 4 Register Bit[3:0] = 4'd0 CONT_ADAPT_0/3_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, + 0x00F0); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate */ + /* Control Register Bit[14:12], Bit[10:8], Bit[6:4], Bit[2:0], + * all rates to 3'b010 TX0/1/2/3_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate */ + /* Control Register Bit[13:12], Bit[9:8], Bit[5:4], Bit[1:0], + * all rates to 2'b10 RX0/1/2/3_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General */ + /* Control 2 Register Bit[15:8] = 2'b01 TX0/1/2/3_WIDTH: 10bits */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, + 0x5500); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General */ + /* Control 2 Register Bit[15:8] = 2'b01 RX0/1/2/3_WIDTH: 10bits */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, + 0x5500); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 2 Register Bit[10:8] = 3'b010 + * MPLLA_DIV16P5_CLK_EN=0, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, + TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10); + + txgbe_wr32_epcs(hw, 0x1f0000, 0x0); + txgbe_wr32_epcs(hw, 0x1f8001, 0x0); + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0); + + if(KX4_TXRX_PIN == 1) + txgbe_wr32_epcs(hw, 0x38001, 0xff); + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + + /* if success, set link status */ + hw->link_status = TXGBE_LINK_STATUS_KX4; + + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if ((KX4_SET == 1) || (adapter->ffe_set == TXGBE_BP_M_KX4)) { + e_dev_info("Set KX4 TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + +out: + TCALL(hw, mac.ops.enable_sec_tx_path); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_RE); + + return status; + +} + + +s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, + u32 speed, + bool autoneg) +{ + u32 i; + s32 status = 0; + u32 wdata = 0; + u32 value; + struct txgbe_adapter *adapter = hw->back; + + /* check link status, if already set, skip setting it again */ + if (hw->link_status == TXGBE_LINK_STATUS_KX && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_MAC_SGMII) { + goto out; + } + e_dev_info("It is set to kx. speed =0x%x\n", speed); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2401); + /* Reset rx lane0 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + } else { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x240A); + TXGBE_WRITE_FLUSH(hw); + msleep(1); + + /* Set the eth change_mode bit first in mis_rst register */ + /* for corresponding LAN port */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + + /* Set SR PCS Control2 Register Bits[1:0] = 2'b01 PCS_TYPE_SEL: non KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X); + + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G); + + /* Set SR MII MMD Control Register to corresponding speed: {Bit[6], + * Bit[13]}=[2'b00,2'b01,2'b10]->[10M,100M,1G] + */ + if (speed == TXGBE_LINK_SPEED_100_FULL) + wdata = 0x2100; + else if (speed == TXGBE_LINK_SPEED_1GB_FULL) + wdata = 0x0140; + else if (speed == TXGBE_LINK_SPEED_10_FULL) + wdata = 0x0100; + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, + wdata); + + value = (0xf5f0 & ~0x710) | (0x5 << 8)| 0x10; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + if (KX_SGMII == 1) + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00); + else + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + + for (i = 0; i < 4; i++) { + if (i) { + value = 0xff06; + } else { + value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + } + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value); + } + + value = 0x0 & ~0x7; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + + value = (0x6db & ~0x7) | 0x4; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 0 Register Bit[7:0] = 8'd32 MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, + TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX); + + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control 3 + * Register Bit[10:0] = 11'd70 MPLLA_BANDWIDTH + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Load 0 Register Bit[12:0] = 13'd1344 VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, + TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX); + + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, 0x549); + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, 0x549); + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, 0x549); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Reference 0 Register Bit[5:0] = 6'd42 VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, + TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX); + + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2929); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE Enable + * Register Bit[4], Bit[0] = 1'b0 AFE_EN_0, DFE_EN_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, + 0x0); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx + * Equalization Control 4 Register Bit[0] = 1'b0 CONT_ADAPT_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, + 0x0010); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate + * Control Register Bit[2:0] = 3'b011 TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, + TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate + * Control Register Bit[2:0] = 3'b011 RX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, + TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General + * Control 2 Register Bit[9:8] = 2'b01 TX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, + TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General + * Control 2 Register Bit[9:8] = 2'b01 RX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, + TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0, + * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, + TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10); + /* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */ + /* Set to 8bit MII (required in 10M/100M SGMII) */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, + 0x0100); + + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + + /* if success, set link status */ + hw->link_status = TXGBE_LINK_STATUS_KX; + + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if ((KX_SET == 1) || (adapter->ffe_set == TXGBE_BP_M_KX)) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + +out: + TCALL(hw, mac.ops.enable_sec_tx_path); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_RE); + + return status; +} + +static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, + u32 speed) +{ + u32 i; + s32 status = 0; + u32 value = 0; + struct txgbe_adapter *adapter = hw->back; + + /* Set the module link speed */ + TCALL(hw, mac.ops.set_rate_select_speed, + speed); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + + /* 2. Disable xpcs AN-73 */ + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + + if (hw->revision_id != TXGBE_SP_MPW) { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x243A); + TXGBE_WRITE_FLUSH(hw); + msleep(1); + /* Set the eth change_mode bit first in mis_rst register + * for corresponding LAN port + */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + /* @. Set SR PCS Control2 Register Bits[1:0] = 2'b00 PCS_TYPE_SEL: KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0); + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + value = value | 0x2000; + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, value); + /* @. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL0 Register Bit[7:0] = 8'd33 + * MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0021); + /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register + * Bit[10:0](MPLLA_BANDWIDTH) = 11'd0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1); + value = (value & ~0x700) | 0x500; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + /* 4.Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register Bit[12:8](RX_VREF_CTRL) + * = 5'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + /* @. Set VR_XS_PMA_Gen5_12G_VCO_CAL_LD0 Register Bit[12:0] = 13'd1353 + * VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0549); + /* @. Set VR_XS_PMA_Gen5_12G_VCO_CAL_REF0 Register Bit[5:0] = 6'd41 + * VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x0029); + /* @. Set VR_XS_PMA_Gen5_12G_TX_RATE_CTRL Register Bit[2:0] = 3'b000 + * TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0); + /* @. Set VR_XS_PMA_Gen5_12G_RX_RATE_CTRL Register Bit[2:0] = 3'b000 + * RX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0); + /* @. Set VR_XS_PMA_Gen5_12G_TX_GENCTRL2 Register Bit[9:8] = 2'b11 + * TX0_WIDTH: 20bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0300); + /* @. Set VR_XS_PMA_Gen5_12G_RX_GENCTRL2 Register Bit[9:8] = 2'b11 + * RX0_WIDTH: 20bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0300); + /* @. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL2 Register Bit[10:8] = 3'b110 + * MPLLA_DIV16P5_CLK_EN=1, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0600); + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register + * Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5] + * (CTLE_POLE_0) = 3'h2, Bit[4:0](CTLE_BOOST_0) = 4'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F); + + } else { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register Bit[15:8] + * (VGA1/2_GAIN_0) = 8'h00, Bit[7:5](CTLE_POLE_0) = 3'h2, + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0); + value = (value & ~0xFFFF) | (2 << 5) | 0x05; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0); + value = (value & ~0x7) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd20 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0014); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE); + value = (value & ~0x11) | 0x11; + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value); + } else { + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd20 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0xBE); + /* 9. Set VR_MII_Gen5_12G_AFE_DFE_EN_CTRL Register Bit[4](DFE_EN_0) = + * 1'b0, Bit[0](AFE_EN_0) = 1'b0 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE); + value = (value & ~0x11) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value = value & ~0x1; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + } else { + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2401); + /* Reset rx lane0 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + } + /* @. Set SR PCS Control2 Register Bits[1:0] = 2'b00 PCS_TYPE_SEL: KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0x1); + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, 0x0000); + /* Set SR MII MMD Control Register to corresponding speed: */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, 0x0140); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1); + value = (value & ~0x710) | 0x500; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register Bit[12:8](RX_VREF_CTRL) + * = 5'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (24 << 8) | 4; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | 16 | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F); + } else { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register Bit[15:8] + * (VGA1/2_GAIN_0) = 8'h00, Bit[7:5](CTLE_POLE_0) = 3'h2, + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0); + value = (value & ~0xFFFF) | 0x7706; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0); + value = (value & ~0x7) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd00 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + /* Set VR_XS_PMA_Gen5_12G_RX_GENCTRL3 Register Bit[2:0] LOS_TRSHLD_0 = 4 */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3); + value = (value & ~0x7) | 0x4; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY + * MPLLA Control 0 Register Bit[7:0] = 8'd32 MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0020); + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 3 Register Bit[10:0] = 11'd70 MPLLA_BANDWIDTH + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0x0046); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Load 0 Register Bit[12:0] = 13'd1344 VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0540); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Reference 0 Register Bit[5:0] = 6'd42 VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x002A); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE + * Enable Register Bit[4], Bit[0] = 1'b0 AFE_EN_0, DFE_EN_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx + * Equalization Control 4 Register Bit[0] = 1'b0 CONT_ADAPT_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x0010); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate + * Control Register Bit[2:0] = 3'b011 TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x0003); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate + * Control Register Bit[2:0] = 3'b011 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x0003); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General + * Control 2 Register Bit[9:8] = 2'b01 TX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0100); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General + * Control 2 Register Bit[9:8] = 2'b01 RX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0100); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA + * Control 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0, + * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0200); + /* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0100); + } + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if ((SFI_SET == 1) || (adapter->ffe_set == TXGBE_BP_M_SFI)) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } +out: + TCALL(hw, mac.ops.enable_sec_tx_path); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_RE); + + return status; +} + + +/** + * txgbe_setup_mac_link - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 txgbe_setup_mac_link_sp(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = 0; + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + bool link_up = false; + u32 curr_autoneg = 2; + + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if ( ! (((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) || + hw->dac_sfp)) { + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + + if (status != 0) + goto out; + + if ((link_speed == speed) && link_up && + !(speed == TXGBE_LINK_SPEED_1GB_FULL && + (adapter->autoneg != curr_autoneg))) + goto out; + } + + if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4 || + hw->dac_sfp) { + txgbe_set_link_to_kr(hw, autoneg); +#if 0 + if (!autoneg) { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + txgbe_set_link_to_kr(hw, autoneg); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + txgbe_set_link_to_kx4(hw, autoneg); + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + txgbe_set_link_to_kx(hw, speed, autoneg); + break; + default: + status = TXGBE_ERR_PHY; + goto out; + } + } else { + txgbe_set_link_to_kr(hw, autoneg); + } +#endif + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) || + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SGMII || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SFI_XAUI)) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + txgbe_set_link_to_kx4(hw, 0); + } else { + txgbe_set_link_to_kx(hw, speed, 0); + txgbe_set_sgmii_an37_ability(hw); + hw->phy.autoneg_advertised |= speed; + } + } else if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) { + if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP && + (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1))) { + txgbe_set_link_to_sfi(hw, speed); + if (speed == TXGBE_LINK_SPEED_1GB_FULL) { + txgbe_setup_fc(hw); + txgbe_set_sgmii_an37_ability(hw); + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + } + } + } +out: + return status; +} + + +/** + * txgbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +STATIC s32 txgbe_setup_copper_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + s32 status; + u32 link_speed; + + /* Setup the PHY according to input speed */ + link_speed = TCALL(hw, phy.ops.setup_link_speed, speed, + autoneg_wait_to_complete); + + if (link_speed != TXGBE_LINK_SPEED_UNKNOWN) + /* Set up MAC */ + status = txgbe_setup_mac_link_sp(hw, link_speed, autoneg_wait_to_complete); + else { + status = 0; + } + return status; +} + +int txgbe_reconfig_mac(struct txgbe_hw *hw) +{ + u32 mac_wdg_timeout; + u32 mac_flow_ctrl; + + mac_wdg_timeout = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + mac_flow_ctrl = rd32(hw, TXGBE_MAC_RX_FLOW_CTRL); + + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_LAN0_MAC_RST); + else if (hw->bus.lan_id == 1) + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_LAN1_MAC_RST); + + /* wait for mac rst complete */ + usec_delay(1500); + wr32m(hw, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_LINK_STS_MOD, + TXGBE_LINK_BOTH_PCS_MAC); + + /* receive packets that size > 2048 */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, TXGBE_MMC_CONTROL, + TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL, + TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, TXGBE_MAC_PKT_FLT, + TXGBE_MAC_PKT_FLT_PR); + + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, mac_wdg_timeout); + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, mac_flow_ctrl); + + return 0; +} + +static int txgbe_reset_misc(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value; + u32 err; + int i; + + if (hw->mac.type == txgbe_mac_aml40) { + if (!(rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK)) { + err = TCALL(hw, mac.ops.setup_link, TXGBE_LINK_SPEED_40GB_FULL, false); + if (err) { + e_dev_info("txgbe_reset_misc setup phy failed\n"); + return err; + } + } + } else if (hw->mac.type == txgbe_mac_aml) { + if ((rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK) + != TXGBE_EPHY_STAT_PPL_LOCK) { + err = TCALL(hw, mac.ops.setup_link, TXGBE_LINK_SPEED_AMLITE_AUTONEG, false); + if (err) { + e_dev_info("txgbe_reset_misc setup phy failed\n"); + return err; + } + } + } else { + value = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + if ((value & 0x3) != TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X) + hw->link_status = TXGBE_LINK_STATUS_NONE; + } + + /* receive packets that size > 2048 */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, TXGBE_MMC_CONTROL, + TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL, + TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, TXGBE_MAC_PKT_FLT, + TXGBE_MAC_PKT_FLT_PR); + + wr32m(hw, TXGBE_MIS_RST_ST, + TXGBE_MIS_RST_ST_RST_INIT, 0xA00); + + wr32(hw, TXGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, TXGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, TXGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, TXGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, TXGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, TXGBE_RDB_PFCMACDAH, 0x0180); + + txgbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +/** + * txgbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 txgbe_reset_hw(struct txgbe_hw *hw) +{ + u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl, sr_an_mmd_adv_reg2; + u32 curr_sr_an_mmd_ctl = 0, curr_sr_an_mmd_adv_reg2 = 0; + u32 curr_sr_pcs_ctl = 0, curr_sr_pma_mmd_ctl1 = 0; + struct txgbe_adapter *adapter = hw->back; + u32 curr_vr_xs_or_pcs_mmd_digi_ctl1 = 0; + u32 vr_xs_or_pcs_mmd_digi_ctl1; + u32 reset_status = 0; + u32 rst_delay = 0; + u32 reset = 0; + s32 status; + u32 value; + u32 i; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = TCALL(hw, mac.ops.stop_adapter); + if (status != 0) + goto reset_hw_out; + + /* Identify PHY and related function pointers */ + status = TCALL(hw, phy.ops.init); + + if (status == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + if (hw->mac.type == txgbe_mac_sp) { + /* remember internel phy regs from before we reset */ + curr_sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + curr_sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + curr_sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + curr_sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + curr_vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + } + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, TXGBE_MIS_RST_ST) & + TXGBE_MIS_RST_ST_RST_INIT) >> + TXGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == TXGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, TXGBE_MIS_RST_ST); + if (!(reset_status & + TXGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + msleep(100); + } + + if (reset_status & TXGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = TXGBE_ERR_RESET_FAILED; + DEBUGOUT("Global reset polling failed to " + "complete.\n"); + goto reset_hw_out; + } + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + goto reset_hw_out; + /* errata 7 */ + if (txgbe_mng_present(hw) && + hw->revision_id == TXGBE_SP_MPW) { + struct txgbe_adapter *adapter = + (struct txgbe_adapter *)hw->back; + adapter->flags2 &= + ~TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED; + } + } else if (hw->reset_type == TXGBE_GLOBAL_RESET) { +#ifndef _WIN32 + struct txgbe_adapter *adapter = + (struct txgbe_adapter *)hw->back; + msleep(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); +#endif /*_WIN32*/ + } + } else { + if (hw->bus.lan_id == 0) { + reset = TXGBE_MIS_RST_LAN0_RST; + } else { + reset = TXGBE_MIS_RST_LAN1_RST; + } + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + if (hw->bus.lan_id == 0) { + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST); + } else { + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST); + } + if (status != 0) + goto reset_hw_out; + } + + status = txgbe_reset_misc(hw); + if (status != 0) + goto reset_hw_out; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_LINKUP_FILTER, TXGBE_LINKUP_FILTER_TIME); + wr32m(hw, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_LINK_STS_MOD, + TXGBE_LINK_BOTH_PCS_MAC); + /* amlite: bme */ + wr32(hw, PX_PF_BME, 0x1); + /* amlite: rdm_rsc_ctl_free_ctl set to 1 */ + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + adapter->an_done = false; + adapter->cur_fec_link = TXGBE_PHY_FEC_AUTO; + } else { + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + vr_xs_or_pcs_mmd_digi_ctl1; + hw->mac.orig_link_settings_stored = true; + } else { + + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + + hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = + curr_sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + curr_vr_xs_or_pcs_mmd_digi_ctl1; + + } + } + /*make sure phy power is up*/ + msleep(100); + + if (hw->mac.type == txgbe_mac_sp) { + /*A temporary solution for set to sfi*/ + if(SFI_SET == 1 || adapter->ffe_set == TXGBE_BP_M_SFI) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + if (KR_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KR) { + e_dev_info("Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + if(KX_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KX) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + + } + } + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + TCALL(hw, mac.ops.init_rx_addrs); + + /* Store the permanent SAN mac address */ + TCALL(hw, mac.ops.get_san_mac_addr, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (txgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + TCALL(hw, mac.ops.set_rar, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, TXGBE_PSR_MAC_SWC_AD_H_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + TCALL(hw, mac.ops.get_wwn_prefix, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + pci_set_master(((struct txgbe_adapter *)hw->back)->pdev); + +reset_hw_out: + return status; +} + +#if 0 +void txgbe_disable_fdir(struct txgbe_hw *hw) +{ + int val = 0; + + if (!hw->Fdir_enabled) { + wr32(hw, TXGBE_RDB_FDIR_CTL, 0x4AF07F18); + TXGBE_WRITE_FLUSH(hw); + } + val = rd32(hw, TXGBE_RDB_FDIR_CTL); +} +#endif + +/** + * txgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +STATIC s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < TXGBE_RDB_FDIR_CMD_CMD_POLL; i++) { + *fdircmd = rd32(hw, TXGBE_RDB_FDIR_CMD); + if (!(*fdircmd & TXGBE_RDB_FDIR_CMD_CMD_MASK)) + return 0; + usec_delay(10); + } + + return TXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +{ + s32 err; + int i; + u32 fdirctrl = rd32(hw, TXGBE_RDB_FDIR_CTL); + u32 fdircmd; + fdirctrl &= ~TXGBE_RDB_FDIR_CTL_INIT_DONE; + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director previous command did not complete, " + "aborting table re-initialization.\n"); + return err; + } + + wr32(hw, TXGBE_RDB_FDIR_FREE, 0); + TXGBE_WRITE_FLUSH(hw); + /* + * sapphire/amber-lite adapters flow director init flow cannot be + * restarted, Workaround sapphire/amber-lite + * silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + wr32m(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CLEARHT, TXGBE_RDB_FDIR_CMD_CLEARHT); + TXGBE_WRITE_FLUSH(hw); + wr32m(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CLEARHT, 0); + TXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + wr32(hw, TXGBE_RDB_FDIR_HASH, 0x00); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_RDB_FDIR_CTL, fdirctrl); + TXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_RDB_FDIR_CTL) & + TXGBE_RDB_FDIR_CTL_INIT_DONE) + break; + msec_delay(1); + } + if (i >= TXGBE_FDIR_INIT_DONE_POLL) { + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return TXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + rd32(hw, TXGBE_RDB_FDIR_USE_ST); + rd32(hw, TXGBE_RDB_FDIR_FAIL_ST); + rd32(hw, TXGBE_RDB_FDIR_MATCH); + rd32(hw, TXGBE_RDB_FDIR_MISS); + rd32(hw, TXGBE_RDB_FDIR_LEN); + + return 0; +} + +/** + * txgbe_fdir_enable - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +STATIC void txgbe_fdir_enable(struct txgbe_hw *hw, u32 fdirctrl) +{ + int i; + + /* Prime the keys for hashing */ + wr32(hw, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(hw, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + wr32(hw, TXGBE_RDB_FDIR_CTL, fdirctrl); + TXGBE_WRITE_FLUSH(hw); + for (i = 0; i < TXGBE_RDB_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_RDB_FDIR_CTL) & + TXGBE_RDB_FDIR_CTL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= TXGBE_RDB_FDIR_INIT_DONE_POLL) + DEBUGOUT("Flow Director poll time exceeded!\n"); +} + +/** + * txgbe_init_fdir_signature -Initialize Flow Director sig filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + int i = VMDQ_P(0) / 4; + int j = VMDQ_P(0) % 4; + u32 flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); + + UNREFERENCED_PARAMETER(adapter); + + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); + + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0xF << TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT) | + (0xA << TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT) | + (4 << TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(hw, fdirctrl); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* errata 1: disable RSC of drop ring 0 */ + wr32m(hw, TXGBE_PX_RR_CFG(0), + TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); + } + return 0; +} + +/** + * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode + **/ +s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) +{ + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | + (TXGBE_RDB_FDIR_DROP_QUEUE << + TXGBE_RDB_FDIR_CTL_DROP_Q_SHIFT) | + (0xF << TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT) | + (0xA << TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT) | + (4 << TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(hw, fdirctrl); + + if (hw->revision_id == TXGBE_SP_MPW) { + if (((struct txgbe_adapter *)hw->back)->num_rx_queues > + TXGBE_RDB_FDIR_DROP_QUEUE) + /* errata 1: disable RSC of drop ring */ + wr32m(hw, + TXGBE_PX_RR_CFG(TXGBE_RDB_FDIR_DROP_QUEUE), + TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); + } + return 0; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define TXGBE_ATR_COMMON_HASH_KEY \ + (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY) +#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * txgbe_atr_compute_sig_hash - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = TXGBE_NTOHL(input.dword); + + /* generate common hash dword */ + hi_hash_dword = TXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(1); + TXGBE_COMPUTE_SIG_HASH_ITERATION(2); + TXGBE_COMPUTE_SIG_HASH_ITERATION(3); + TXGBE_COMPUTE_SIG_HASH_ITERATION(4); + TXGBE_COMPUTE_SIG_HASH_ITERATION(5); + TXGBE_COMPUTE_SIG_HASH_ITERATION(6); + TXGBE_COMPUTE_SIG_HASH_ITERATION(7); + TXGBE_COMPUTE_SIG_HASH_ITERATION(8); + TXGBE_COMPUTE_SIG_HASH_ITERATION(9); + TXGBE_COMPUTE_SIG_HASH_ITERATION(10); + TXGBE_COMPUTE_SIG_HASH_ITERATION(11); + TXGBE_COMPUTE_SIG_HASH_ITERATION(12); + TXGBE_COMPUTE_SIG_HASH_ITERATION(13); + TXGBE_COMPUTE_SIG_HASH_ITERATION(14); + TXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= TXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= TXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * txgbe_atr_add_signature_filter - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + **/ +s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue) +{ + u32 fdirhashcmd = 0; + u8 flow_type; + u32 fdircmd; + s32 err; + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + flow_type = input.formatted.flow_type; + switch (flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + case TXGBE_ATR_FLOW_TYPE_UDPV4: + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + case TXGBE_ATR_FLOW_TYPE_TCPV6: + case TXGBE_ATR_FLOW_TYPE_UDPV6: + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return TXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + fdircmd |= (u32)flow_type << TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT; + + fdirhashcmd |= txgbe_atr_compute_sig_hash(input, common); + fdirhashcmd |= 0x1 << TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhashcmd); + + wr32(hw, TXGBE_RDB_FDIR_CMD, fdircmd); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return 0; +} + +#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 11; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = TXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = TXGBE_NTOHL(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * txgbe_get_fdirtcpm - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +STATIC u32 txgbe_get_fdirtcpm(union txgbe_atr_input *input_mask) +{ + u32 mask = TXGBE_NTOHS(input_mask->formatted.dst_port); + mask <<= TXGBE_RDB_FDIR_TCP_MSK_DPORTM_SHIFT; + mask |= TXGBE_NTOHS(input_mask->formatted.src_port); + + return mask; +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define TXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define TXGBE_WRITE_REG_BE32(a, reg, value) \ + wr32((a), (reg), TXGBE_STORE_AS_BE32(TXGBE_NTOHL(value))) + +#define TXGBE_STORE_AS_BE16(_value) \ + TXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, + union txgbe_atr_input *input_mask, + bool cloud_mode) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = 0; + u32 fdirtcpm; + u32 flex = 0; + int i, j; +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; +#endif + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + DEBUGOUT(" bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL; + case 0x7F: + break; + default: + DEBUGOUT(" Error on vm pool mask\n"); + return TXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + DEBUGOUT(" Error on src/dst port mask\n"); + return TXGBE_ERR_CONFIG; + } + case TXGBE_ATR_L4TYPE_MASK: + break; + default: + DEBUGOUT(" Error on flow type mask\n"); + return TXGBE_ERR_CONFIG; + } + +#if 0 + /* need fix */ + switch (input_mask->formatted.tunnel_type & 0xFFFFFFFF) { + case 0x0: + /* Mask turnnel type, fall through */ + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_TUN | + TXGBE_RDB_FDIR_OTHER_MSK_TUN_OUTIP | + TXGBE_RDB_FDIR_OTHER_MSK_TUN_TYPE; + case 0xFFFFFFFF: + break; + default: + DEBUGOUT(" Error on tunnel type byte mask\n"); + return TXGBE_ERR_CONFIG; + } +#endif + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + wr32(hw, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); + + i = VMDQ_P(0) / 4; + j = VMDQ_P(0) % 4; + flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on flexible byte mask\n"); + return TXGBE_ERR_CONFIG; + } + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); + + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = txgbe_get_fdirtcpm(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + wr32(hw, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm); + wr32(hw, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm); + wr32(hw, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm); + + /* store source and destination IP masks (little-enian) */ + wr32(hw, TXGBE_RDB_FDIR_SA4_MSK, + TXGBE_NTOHL(~input_mask->formatted.src_ip[0])); + wr32(hw, TXGBE_RDB_FDIR_DA4_MSK, + TXGBE_NTOHL(~input_mask->formatted.dst_ip[0])); + return 0; +} + +s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id, u8 queue, + bool cloud_mode) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + s32 err; + + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + wr32(hw, TXGBE_RDB_FDIR_IP6(2), + TXGBE_NTOHL(input->formatted.src_ip[0])); + wr32(hw, TXGBE_RDB_FDIR_IP6(1), + TXGBE_NTOHL(input->formatted.src_ip[1])); + wr32(hw, TXGBE_RDB_FDIR_IP6(0), + TXGBE_NTOHL(input->formatted.src_ip[2])); + + /* record the source address (little-endian) */ + wr32(hw, TXGBE_RDB_FDIR_SA, + TXGBE_NTOHL(input->formatted.src_ip[0])); + + /* record the first 32 bits of the destination address + * (little-endian) */ + wr32(hw, TXGBE_RDB_FDIR_DA, + TXGBE_NTOHL(input->formatted.dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = TXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirport |= TXGBE_NTOHS(input->formatted.src_port); + wr32(hw, TXGBE_RDB_FDIR_PORT, fdirport); + } + + /* record packet type and flex_bytes(little-endian) */ + fdirvlan = TXGBE_NTOHS(input->formatted.flex_bytes); + fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT; + + fdirvlan |= TXGBE_NTOHS(input->formatted.vlan_id); + wr32(hw, TXGBE_RDB_FDIR_FLEX, fdirvlan); + + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash | + 0x1 << TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT; + fdirhash |= soft_id << TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + TXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + if (queue == TXGBE_RDB_FDIR_DROP_QUEUE) + fdircmd |= TXGBE_RDB_FDIR_CMD_DROP; + fdircmd |= input->formatted.flow_type << + TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << + TXGBE_RDB_FDIR_CMD_VT_POOL_SHIFT; + + wr32(hw, TXGBE_RDB_FDIR_CMD, fdircmd); + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + return 0; +} + +s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd; + s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush hash to HW */ + TXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + wr32(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) { + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + TXGBE_WRITE_FLUSH(hw); + wr32(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW); + } + + return 0; +} + + +/** + * txgbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 txgbe_start_hw(struct txgbe_hw *hw) +{ + int ret_val = 0; + u32 i; + + /* Set the media type */ + hw->phy.media_type = TCALL(hw, mac.ops.get_media_type); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + TCALL(hw, mac.ops.clear_vfta); + + /* Clear statistics registers */ + TCALL(hw, mac.ops.clear_hw_cntrs); + + TXGBE_WRITE_FLUSH(hw); + /* Setup flow control */ + ret_val = TCALL(hw, mac.ops.setup_fc); + + if (hw->mac.type == txgbe_mac_sp) { + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32(hw, TXGBE_TDM_RP_IDX, i); + wr32(hw, TXGBE_TDM_RP_RATE, 0); + } + } + TXGBE_WRITE_FLUSH(hw); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ret_val; +} + +/** + * txgbe_identify_phy - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +s32 txgbe_identify_phy(struct txgbe_hw *hw) +{ + /* Detect PHY if not unknown - returns success if already detected. */ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + enum txgbe_media_type media_type; + + /* avoid fw access phy */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) { + /* Let firmware know the driver has taken over */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + mdelay(100); + } + + if (!hw->phy.phy_semaphore_mask) { + hw->phy.phy_semaphore_mask = TXGBE_MNG_SWFW_SYNC_SW_PHY; + } + + media_type = TCALL(hw, mac.ops.get_media_type); + if (media_type == txgbe_media_type_copper) { + status = txgbe_init_external_phy(hw); + if (status != 0) { + return status; + } + txgbe_get_phy_id(hw); + hw->phy.type = txgbe_get_phy_type_from_id(hw); + status = 0; + } else if (media_type == txgbe_media_type_fiber || + media_type == txgbe_media_type_fiber_qsfp) { + status = txgbe_identify_module(hw); + } else { + hw->phy.type = txgbe_phy_none; + status = 0; + } + + /* Let firmware take over control of h/w */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == txgbe_phy_sfp_unsupported) + return TXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + +int txgbe_set_pps(struct txgbe_hw *hw, bool enable, u64 nsec, u64 cycles) +{ + int status; + struct txgbe_hic_set_pps pps_cmd; + int i; + + pps_cmd.hdr.cmd = FW_PPS_SET_CMD; + pps_cmd.hdr.buf_len = FW_PPS_SET_LEN; + pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + pps_cmd.lan_id = hw->bus.lan_id; + pps_cmd.enable = enable; + pps_cmd.nsec = nsec; + pps_cmd.cycles = cycles; + pps_cmd.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&pps_cmd, + sizeof(pps_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + msleep(1); + if (status != 0) + continue; + + if (pps_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + break; + } + + return status; + +} + +/** + * txgbe_enable_rx_dma - Enable the Rx DMA unit on sapphire/amber-lite + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for sapphire/amber-lite + **/ +s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval) +{ + /* + * Workaround for sapphire/amber-lite silicon errata when enabling the + * Rx datapath. If traffic is incoming before we enable the Rx unit, it + * could hang the Rx DMA unit. Therefore, make sure the security engine + * is completely disabled prior to enabling the Rx unit. + */ + + TCALL(hw, mac.ops.disable_sec_rx_path); + + if (regval & TXGBE_RDB_PB_CTL_RXEN) + TCALL(hw, mac.ops.enable_rx); + else + TCALL(hw, mac.ops.disable_rx); + + TCALL(hw, mac.ops.enable_sec_rx_path); + + return 0; +} + +/** + * txgbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters txgbe_eeprom_info within the + * txgbe_hw struct in order to set up EEPROM access. + **/ +s32 txgbe_init_flash_params(struct txgbe_hw *hw) +{ + struct txgbe_flash_info *flash = &hw->flash; + u32 eec; + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + DEBUGOUT3("FLASH params: size = %d, address bits: %d\n", + flash->dword_size, + flash->address_bits); + + return 0; +} + +/** + * txgbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = TXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, TXGBE_SPI_DATA, data[i]); + wr32(hw, TXGBE_SPI_CMD, + TXGBE_SPI_CMD_ADDR(offset + i) | + TXGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, TXGBE_SPI_STATUS, + TXGBE_SPI_STATUS_OPDONE, TXGBE_SPI_STATUS_OPDONE, + TXGBE_SPI_TIMEOUT, 0); + if (status) { + DEBUGOUT("FLASH read timed out\n"); + break; + } + } + + return status; +} + +/** + * txgbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = TXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, TXGBE_SPI_CMD, + TXGBE_SPI_CMD_ADDR(offset + i) | + TXGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, TXGBE_SPI_STATUS, + TXGBE_SPI_STATUS_OPDONE, TXGBE_SPI_STATUS_OPDONE, + TXGBE_SPI_TIMEOUT, 0); + if (status != 0) { + DEBUGOUT("FLASH write timed out\n"); + break; + } + data[i] = rd32(hw, TXGBE_SPI_DATA); + } + + return status; +} + +/** + * txgbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters txgbe_eeprom_info within the + * txgbe_hw struct in order to set up EEPROM access. + **/ +s32 txgbe_init_eeprom_params(struct txgbe_hw *hw) +{ + struct txgbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + s32 status = 0; + u16 data; + + if (eeprom->type == txgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = txgbe_eeprom_none; + + if (!(rd32(hw, TXGBE_SPI_STATUS) & + TXGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = txgbe_flash; + + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + } + + status = TCALL(hw, eeprom.ops.read, TXGBE_SW_REGION_PTR, + &data); + if (status) { + DEBUGOUT("NVM Read Error\n"); + return status; + } + eeprom->sw_region_offset = data >> 1; + + return status; +} + +/** + * txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct txgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = TXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (txgbe_check_mng_access(hw)) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + *data = (u16)rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, + FW_NVM_DATA_OFFSET); + else if (hw->mac.type == txgbe_mac_sp) + *data = (u16)rd32a(hw, TXGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + + return 0; +} + +/** + * txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status = 0; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = txgbe_read_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct txgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 reg; + u32 i; + u32 value = 0; + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = TXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = TXGBE_CPU_TO_BE16(words_to_read * 2); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + reg = TXGBE_AML_MNG_MBOX_FW2SW; + else + reg = TXGBE_MNG_MBOX; + + for (i = 0; i < words_to_read; i++) { + if (txgbe_check_mng_access(hw)) + value = rd32(hw, reg + (FW_NVM_DATA_OFFSET << 2) + 2 * i); + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + TCALL(hw, mac.ops.release_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + +/** + * txgbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static s32 txgbe_write_ee_hostif_data(struct txgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; +#ifndef TXGBE_SWFW_MBOX_AML + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; +#endif + + /* one word */ + buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = TXGBE_CPU_TO_BE32(offset * 2); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +s32 txgbe_close_notify(struct txgbe_hw *hw) +{ + int tmp; + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_DW_CLOSE_NOTIFY; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = 0; + buffer.address = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + 500, false); + if (status) + return status; + + if (txgbe_check_mng_access(hw)){ + tmp = (u32)rd32a(hw, TXGBE_MNG_MBOX, 1); + if (tmp == TXGBE_CHECKSUM_CAP_ST_PASS) + status = 0; + else + status = TXGBE_ERR_EEPROM_CHECKSUM; + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +s32 txgbe_open_notify(struct txgbe_hw *hw) +{ + int tmp; + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_DW_OPEN_NOTIFY; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = 0; + buffer.address = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + 500, false); + if (status) + return status; + + if (txgbe_check_mng_access(hw)){ + tmp = (u32)rd32a(hw, TXGBE_MNG_MBOX, 1); + if (tmp == TXGBE_CHECKSUM_CAP_ST_PASS) + status = 0; + else + status = TXGBE_ERR_EEPROM_CHECKSUM; + + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +/** + * txgbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = 0; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = txgbe_write_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u16 i = 0; + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + goto out; + } + + for (i = 0; i < words; i++) { + status = txgbe_write_ee_hostif_data(hw, offset + i, + data[i]); + + if (status != 0) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_FLASH); +out: + + return status; +} + + + +/** + * txgbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 i; + + TCALL(hw, eeprom.ops.init_params); + + if (!buffer) { + eeprom_ptrs = (u16 *)vmalloc(TXGBE_EEPROM_LAST_WORD * + sizeof(u16)); + if (!eeprom_ptrs) + return TXGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = txgbe_read_ee_hostif_buffer(hw, 0, + TXGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < TXGBE_EEPROM_LAST_WORD) + return TXGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + if ((i > (TXGBE_SHOWROM_I2C_PTR / 2)) && (i < (TXGBE_SHOWROM_I2C_END / 2))) + local_buffer[i] = 0xffff; + if (i != hw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + } + + checksum = (u16)TXGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (s32)checksum; +} + +/** + * txgbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = txgbe_read_ee_hostif(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = txgbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = txgbe_write_ee_hostif(hw, TXGBE_EEPROM_CHECKSUM, + checksum); + + return status; +} + +/** + * txgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = TCALL(hw, eeprom.ops.read, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = TCALL(hw, eeprom.ops.calc_checksum); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = txgbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + TXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = TXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * txgbe_update_flash - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 txgbe_update_flash(struct txgbe_hw *hw) +{ + s32 status = 0; + union txgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + + if (hw->mac.type == txgbe_mac_sp) + buffer.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + + +/** + * txgbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 txgbe_check_mac_link_sp(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg = 0; + u16 value = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + /* read ext phy link status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } else { + *link_up = true; + } + + if (*link_up) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + } + msleep(100); + } + } else { + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + /* read ext phy link status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } else { + *link_up = true; + } + if (*link_up) { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) + *link_up = true; + else + *link_up = false; + } + } + + /* sync link status to fw for ocp card */ + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) + wr32(hw, TXGBE_TSC_LSEC_PKTNUM0, value); + + if ((hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + *link_up = hw->f2c_mod_status; + + if (*link_up) + /* recover led configure when link up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + else + /* over write led when link down */ + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | + TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); + } + + if (*link_up) { + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + if ((value & 0xc000) == 0xc000) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + else if ((value & 0xc000) == 0x8000) + *speed = TXGBE_LINK_SPEED_1GB_FULL; + else if ((value & 0xc000) == 0x4000) + *speed = TXGBE_LINK_SPEED_100_FULL; + else if ((value & 0xc000) == 0x0000) + *speed = TXGBE_LINK_SPEED_10_FULL; + } else { + if ((links_reg & TXGBE_CFG_PORT_ST_LINK_10G) == + TXGBE_CFG_PORT_ST_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_1G) == + TXGBE_CFG_PORT_ST_LINK_1G) + *speed = TXGBE_LINK_SPEED_1GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_100M) == + TXGBE_CFG_PORT_ST_LINK_100M) + *speed = TXGBE_LINK_SPEED_100_FULL; + else + *speed = TXGBE_LINK_SPEED_10_FULL; + } + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * txgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee) +{ + /* fix eee */ + + return 0; +} + +s32 txgbe_hic_write_lldp(struct txgbe_hw *hw,u32 open) +{ + int status; + struct txgbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct txgbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf1 - open; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + buffer.func = PCI_FUNC(pdev->devfn); + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + return status; + +} + +static int txgbe_hic_get_lldp(struct txgbe_hw *hw) +{ + int status; + struct txgbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf2; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + buffer.func = hw->bus.lan_id; + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + if (buffer.hdr.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) + return -1; + else + return (int)buffer.func; +} + +int txgbe_is_lldp(struct txgbe_hw *hw) +{ + u32 tmp = 0, lldp_flash_data = 0, i = 0; + struct txgbe_adapter *adapter = hw->back; + s32 status = 0; + + + status = txgbe_hic_get_lldp(hw); + if (status != -1) { + if (status) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + return 0; + } else { + for (; i < 0x1000 / sizeof(u32); i++) { + status = txgbe_flash_read_dword(hw, TXGBE_LLDP_REG + i * 4, &tmp); + if(status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + + } + if (lldp_flash_data & BIT(hw->bus.lan_id)) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + } + return 0; +} + +void txgbe_hic_write_autoneg_status(struct txgbe_hw *hw, bool autoneg) +{ + struct txgbe_adapter *adapter = hw->back; + struct txgbe_hic_write_autoneg buffer; + + /* only support sp temporarily */ + if (hw->mac.type != txgbe_mac_sp) + return; + + /* only 0x64e20011 and above 0x20011 support */ + if (adapter->etrack_id != 0x64e20011 && + (adapter->etrack_id & 0xfffff) < 0x20012) + return; + + buffer.hdr.cmd = FW_AN_STA_CMD; + buffer.hdr.buf_len = FW_AN_STA_LEN; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.lan_id = hw->bus.lan_id; + buffer.autoneg = autoneg; + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); +} + +void txgbe_set_queue_rate_limit(struct txgbe_hw *hw, int queue, u16 max_tx_rate) +{ + struct txgbe_adapter *adapter = hw->back; + int factor_int; + int factor_fra; + int link_speed; + int bcnrc_val; + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (max_tx_rate) { + u16 frac; + + link_speed = txgbe_link_mbps(adapter); + max_tx_rate = max_tx_rate * 105 / 100; //necessary offset by test + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + if (max_tx_rate > link_speed) { + factor_int = 1; + factor_fra = 0; + } + + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, queue); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, queue); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, 0); + } + } else { + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); + + wr32(hw, TXGBE_TDM_RP_IDX, queue); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } + +} + +int txgbe_hic_notify_led_active(struct txgbe_hw *hw, int active_flag) +{ + int status; + struct txgbe_led_active_set buffer; + + buffer.hdr.cmd = 0xf8; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.active_flag = active_flag; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + + return 0; +} + +bool txgbe_is_backplane(struct txgbe_hw *hw) +{ + + return hw->mac.ops.get_media_type(hw) == txgbe_media_type_backplane ? + true : false; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index abc729eb187ae8da87d25cd3f2d3c881199b5192..3250a90fc55b246cf4be1209b8bc8d4e7b6d078e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -1,13 +1,369 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ #ifndef _TXGBE_HW_H_ #define _TXGBE_HW_H_ -int txgbe_disable_sec_tx_path(struct wx *wx); -void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); -int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); -int txgbe_reset_hw(struct wx *wx); +#define TXGBE_EMC_INTERNAL_DATA 0x00 +#define TXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define TXGBE_EMC_DIODE1_DATA 0x01 +#define TXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define TXGBE_EMC_DIODE2_DATA 0x23 +#define TXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define TXGBE_EMC_DIODE3_DATA 0x2A +#define TXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +#define SPI_CLK_DIV 2 + +#define SPI_CMD_ERASE_CHIP 4 // SPI erase chip command +#define SPI_CMD_ERASE_SECTOR 3 // SPI erase sector command +#define SPI_CMD_WRITE_DWORD 0 // SPI write a dword command +#define SPI_CMD_READ_DWORD 1 // SPI read a dword command +#define SPI_CMD_USER_CMD 5 // SPI user command + +#define SPI_CLK_CMD_OFFSET 28 // SPI command field offset in Command register +#define SPI_CLK_DIV_OFFSET 25 // SPI clock divide field offset in Command register + +#define SPI_TIME_OUT_VALUE 10000 +#define SPI_SECTOR_SIZE (4 * 1024) // FLASH sector size is 64KB +#define SPI_H_CMD_REG_ADDR 0x10104 // SPI Command register address +#define SPI_H_DAT_REG_ADDR 0x10108 // SPI Data register address +#define SPI_H_STA_REG_ADDR 0x1010c // SPI Status register address +#define SPI_H_USR_CMD_REG_ADDR 0x10110 // SPI User Command register address +#define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1 +#define MISC_RST_REG_ADDR 0x1000c // Misc reset register address +#define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read +#define PRB_CTL 0x10200 // used to check whether has been upgraded +#define PRB_SCRATCH 0x10230 // used to check whether has been upgraded + +#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH +#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 +#define MAC_ADDR1_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN1, stored in external FLASH +#define MAC_ADDR1_WORD1_OFFSET_1G 0x0070014 + +#define AMLITE_MAC_ADDR0_WORD0_OFFSET 0x00f010c // MAC Address for LAN0, stored in external FLASH +#define AMLITE_MAC_ADDR0_WORD1_OFFSET 0x00f0114 +#define AMLITE_MAC_ADDR1_WORD0_OFFSET 0x00f020c // MAC Address for LAN1, stored in external FLASH +#define AMLITE_MAC_ADDR1_WORD1_OFFSET 0x00f0214 + +#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 // Product Serial Number, stored in external FLASH last sector +#define TXGBE_VPD_OFFSET 0x500 +#define TXGBE_VPD_END 0x600 + +struct txgbe_hic_read_cab { + union txgbe_hic_hdr2 hdr; + union { + u8 d8[252]; + u16 d16[126]; + u32 d32[63]; + } dbuf; +}; + +#ifndef read_poll_timeout +#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) +#endif + +/** + * Packet Type decoding + **/ +/* txgbe_dec_ptype.mac: outer mac */ +enum txgbe_dec_ptype_mac { + TXGBE_DEC_PTYPE_MAC_IP = 0, + TXGBE_DEC_PTYPE_MAC_L2 = 2, + TXGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* txgbe_dec_ptype.[e]ip: outer&encaped ip */ +#define TXGBE_DEC_PTYPE_IP_FRAG (0x4) +enum txgbe_dec_ptype_ip { + TXGBE_DEC_PTYPE_IP_NONE = 0, + TXGBE_DEC_PTYPE_IP_IPV4 = 1, + TXGBE_DEC_PTYPE_IP_IPV6 = 2, + TXGBE_DEC_PTYPE_IP_FGV4 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV4), + TXGBE_DEC_PTYPE_IP_FGV6 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV6), +}; + +/* txgbe_dec_ptype.etype: encaped type */ +enum txgbe_dec_ptype_etype { + TXGBE_DEC_PTYPE_ETYPE_NONE = 0, + TXGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + TXGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + TXGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + TXGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* txgbe_dec_ptype.proto: payload proto */ +enum txgbe_dec_ptype_prot { + TXGBE_DEC_PTYPE_PROT_NONE = 0, + TXGBE_DEC_PTYPE_PROT_UDP = 1, + TXGBE_DEC_PTYPE_PROT_TCP = 2, + TXGBE_DEC_PTYPE_PROT_SCTP = 3, + TXGBE_DEC_PTYPE_PROT_ICMP = 4, + TXGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* txgbe_dec_ptype.layer: payload layer */ +enum txgbe_dec_ptype_layer { + TXGBE_DEC_PTYPE_LAYER_NONE = 0, + TXGBE_DEC_PTYPE_LAYER_PAY2 = 1, + TXGBE_DEC_PTYPE_LAYER_PAY3 = 2, + TXGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct txgbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; +typedef struct txgbe_dec_ptype txgbe_dptype; + +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr); +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr); +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data); +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data); + +void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map); +u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw); +s32 txgbe_init_hw(struct txgbe_hw *hw); +s32 txgbe_start_hw(struct txgbe_hw *hw); +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw); +s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr); +s32 txgbe_get_bus_info(struct txgbe_hw *hw); +void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status); +void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw); +s32 txgbe_stop_adapter(struct txgbe_hw *hw); + +s32 txgbe_led_on(struct txgbe_hw *hw, u32 index); +s32 txgbe_led_off(struct txgbe_hw *hw, u32 index); + +s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr); +s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index); +s32 txgbe_init_rx_addrs(struct txgbe_hw *hw); +s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + txgbe_mc_addr_itr func, bool clear); +s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr func); +s32 txgbe_enable_mc(struct txgbe_hw *hw); +s32 txgbe_disable_mc(struct txgbe_hw *hw); +s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw); +s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw); +s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw); +s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw); + + +s32 txgbe_fc_enable(struct txgbe_hw *hw); +bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw); +void txgbe_fc_autoneg(struct txgbe_hw *hw); +s32 txgbe_setup_fc(struct txgbe_hw *hw); + +s32 txgbe_validate_mac_addr(u8 *mac_addr); +s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask); +void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask); +s32 txgbe_disable_pcie_master(struct txgbe_hw *hw); + + +s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); +s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); + +s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq); +s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq); +s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq); +s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq); +s32 txgbe_init_uta_tables(struct txgbe_hw *hw); +s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +s32 txgbe_clear_vfta(struct txgbe_hw *hw); +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan); + +s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf); +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf); +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf); +s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps); +void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +s32 txgbe_reset_hostif(struct txgbe_hw *hw); +u8 txgbe_calculate_checksum(u8 *buffer, u32 length); +s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void txgbe_clear_tx_pending(struct txgbe_hw *hw); +void txgbe_stop_mac_link_on_d3(struct txgbe_hw *hw); +bool txgbe_mng_present(struct txgbe_hw *hw); +bool txgbe_check_mng_access(struct txgbe_hw *hw); + +s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw); +s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw); +void txgbe_enable_rx(struct txgbe_hw *hw); +void txgbe_disable_rx(struct txgbe_hw *hw); +s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit); + +#if 0 +void txgbe_disable_fdir(struct txgbe_hw *hw); +#endif + +/* @txgbe_api.h */ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw); +s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl); +s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue); +s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, + union txgbe_atr_input *input_mask, bool cloud_mode); +s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id); +s32 txgbe_fdir_add_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + union txgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *mask); +u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common); + +s32 txgbe_get_link_capabilities_sp(struct txgbe_hw *hw, + u32 *speed, bool *autoneg); +enum txgbe_media_type txgbe_get_media_type_sp(struct txgbe_hw *hw); +void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, + u32 speed); +int txgbe_init_shared_code(struct txgbe_hw *hw); +s32 txgbe_setup_mac_link_sp(struct txgbe_hw *hw, u32 speed, + bool autoneg_wait_to_complete); +void txgbe_init_mac_link_ops_sp(struct txgbe_hw *hw); +s32 txgbe_reset_hw(struct txgbe_hw *hw); +s32 txgbe_identify_phy(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops_sp(struct txgbe_hw *hw); +s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval); +s32 txgbe_init_ops_generic(struct txgbe_hw *hw); +s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee); +int txgbe_reconfig_mac(struct txgbe_hw *hw); + +s32 txgbe_init_flash_params(struct txgbe_hw *hw); +s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data); +s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data); + +s32 txgbe_read_eeprom(struct txgbe_hw *hw, + u16 offset, u16 *data); +s32 txgbe_read_eeprom_buffer(struct txgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 txgbe_init_eeprom_params(struct txgbe_hw *hw); +s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw); +s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw); +s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, + u16 *checksum_val); +s32 txgbe_update_flash(struct txgbe_hw *hw); +int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size); +s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 data); +s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, u16 *data); +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr); +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data); +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data); +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr); + +s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size); + +s32 txgbe_close_notify(struct txgbe_hw *hw); +s32 txgbe_open_notify(struct txgbe_hw *hw); + +s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg); +s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg); + +s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, + u32 speed, + bool autoneg); +int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data); +s32 txgbe_hic_write_lldp(struct txgbe_hw *hw,u32 open); +int txgbe_is_lldp(struct txgbe_hw *hw); +s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw); +int txgbe_set_pps(struct txgbe_hw *hw, bool enable, u64 nsec, u64 cycles); +void txgbe_hic_write_autoneg_status(struct txgbe_hw *hw,bool autoneg); +int txgbe_enable_rx_adapter(struct txgbe_hw *hw); + +extern s32 txgbe_init_ops_aml(struct txgbe_hw *hw); +extern s32 txgbe_init_ops_aml40(struct txgbe_hw *hw); + +void txgbe_set_queue_rate_limit(struct txgbe_hw *hw, int queue, u16 max_tx_rate); + +int txgbe_hic_notify_led_active(struct txgbe_hw *hw, int active_flag); +bool txgbe_is_backplane(struct txgbe_hw *hw); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.c new file mode 100644 index 0000000000000000000000000000000000000000..dd3b6e66bc49dcfc24bde557d0c5891e5f1989ab --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.c @@ -0,0 +1,3015 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on kcompat.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "txgbe.h" +#include "txgbe_kcompat.h" + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i = 0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char *number(char *buf, char *end, long long num, int base, int size, int precision, int type) +{ + char c, sign, tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base == 8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base == 16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t *ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf, size, fmt, args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)) +int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) +int txgbe_dcb_netlink_register(void) +{ + return 0; +} + +int txgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int txgbe_copy_dcb_cfg(struct txgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= TXGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > TXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return txgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a whitelist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current whitelist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#else /* !RHEL || RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void txgbe_flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void txgbe_flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void txgbe_flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void txgbe_flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void txgbe_flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void txgbe_flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void txgbe_flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void txgbe_flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void txgbe_flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void txgbe_flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void txgbe_flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void txgbe_flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* !RHEL || RHEL < 8.1 */ +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#if 0 +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif +#endif +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#ifndef ETH_HW_ADDR_SET +void _kc_eth_hw_addr_set_txgbe(struct net_device *dev, const void *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* ETH_HW_ADDR_SET */ +#endif + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..321ada0ac0215a8c61e1f367627f9c115f9bed8a --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h @@ -0,0 +1,7859 @@ +#ifndef _TXGBE_KYLIN_H__ +#define _TXGBE_KYLIN_H__ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/**************************selection************************************/ + +/*#define TXGBE_SUPPORT_KYLIN_FT*/ /*kylinft to open*/ +/*#define TXGBE_SUPPORT_KYLIN_LX*/ /*kylinlx to open*/ + +#ifndef AUTO +#define AUTO 1 +#endif + +#ifndef KR_FEC +#define KR_FEC 1 +#endif + +/**************************performance************************************/ +#ifndef DEFAULT_FCPAUSE +#define DEFAULT_FCPAUSE 0xFFFF /* kylinft/kylinlx : 0x3FFF default to 0xFFFF*/ +#endif + +#ifndef MAX_REQUEST_SIZE +#define MAX_REQUEST_SIZE 256 /* kylinft : 512 default to 256*/ +#endif + +#ifndef DEFAULT_TXD +#define DEFAULT_TXD 512 /*deepinsw : 1024 default to 512*/ +#endif + +#ifndef DEFAULT_TX_WORK +#define DEFAULT_TX_WORK 256 /*deepinsw : 512 default to 256*/ +#endif + +#ifndef CL72_KRTR_PRBS_MODE_EN +#define CL72_KRTR_PRBS_MODE_EN 0xffff /*open kr prbs check */ +#endif + +#ifndef TXGBE_STATIC_ITR +#define TXGBE_STATIC_ITR 1 /* static itr configure */ +#endif + +#ifndef TXGBE_PCIE_RECOVER +#define TXGBE_PCIE_RECOVER 1 /* pcie recovery, defalut to open */ +#endif + +#ifndef TXGBE_RECOVER_CHECK +#define TXGBE_RECOVER_CHECK 1 /* recover check log , default 1 */ +#endif + +#ifndef TXGBE_DIS_COMP_TIMEOUT +#define TXGBE_DIS_COMP_TIMEOUT 1 /* dis completion timeout, default 1 to dis */ +#endif + +#ifndef AN73_TRAINNING_MODE +#define AN73_TRAINNING_MODE 1 /* 0 : kd5886 1: centc 2: wx2wx */ +#endif + +#ifndef CL72_KRTR_PRBS31_EN +#define CL72_KRTR_PRBS31_EN 0 +#endif + +#ifndef TXGBE_SWFW_MBOX_AML +#define TXGBE_SWFW_MBOX_AML +#endif + +#ifndef TXGBE_DMA_RESET +#define TXGBE_DMA_RESET 1 +#endif + +#ifndef TXGBE_1588_PPS_LEVEL +#define TXGBE_1588_PPS_LEVEL 1 +#endif + +#ifndef TXGBE_1588_PPS_WIDTH +#define TXGBE_1588_PPS_WIDTH 100 +#endif + +#ifndef TXGBE_1588_TOD_ENABLE +#define TXGBE_1588_TOD_ENABLE 1 +#endif + +#ifndef CL72_KRTR_PRBS_MODE_EN +#define CL72_KRTR_PRBS_MODE_EN 0xffff /*open kr prbs check */ +#endif + +#ifndef CL74_KRTR_TRAINNING_TIMEOUT +#define CL74_KRTR_TRAINNING_TIMEOUT 3000 +#endif +#ifndef AN_TRAINNING_MODE +#define AN_TRAINNING_MODE 0 +#endif + +/**************************performance************************************/ + +/**************************sfi************************************/ +#ifndef SFI_SET +#define SFI_SET 0 +#define SFI_MAIN 24 +#define SFI_PRE 4 +#define SFI_POST 16 +#endif +#ifndef SFI_TXRX_PIN +#define SFI_TXRX_PIN 0 /*rx : 0xf tx : 0xf0 */ +#endif +/**************************sfi************************************/ + +/**************************kr************************************/ +#ifndef KR_MODE +#define KR_MODE 0 +#endif + +#ifndef KR_AN73_PRESET +#define KR_AN73_PRESET 0 +#endif + +#ifndef KR_POLLING +#define KR_POLLING 0 +#endif + +#ifndef KR_RESTART_T_MODE +#define KR_RESTART_T_MODE 0 +#endif + +#ifndef KR_SET +#define KR_SET 0 +#define KR_MAIN 27 +#define KR_PRE 8 +#define KR_POST 44 +#endif +#ifndef KR_TXRX_PIN +#define KR_TXRX_PIN 0 /*rx : 0xf tx : 0xf0 */ +#endif +/**************************kr************************************/ + +/**************************kx4************************************/ +#ifndef KX4_SET +#define KX4_SET 0 +#define KX4_MAIN 40 +#define KX4_PRE 0 +#define KX4_POST 0 +#endif +#ifndef KX4_TXRX_PIN +#define KX4_TXRX_PIN 0 /*rx : 0xf tx : 0xf0 */ +#endif +/**************************kx4************************************/ + +/**************************kx************************************/ +#ifndef KX_SET +#define KX_SET 0 +#define KX_MAIN 24 +#define KX_PRE 4 +#define KX_POST 16 +#endif + +#ifndef KX_SGMII +#define KX_SGMII 0 /* 1 0x18090 :0xcf00 */ +#endif +/**************************kx************************************/ + +#ifndef BOND_CHECK_LINK_MODE +#define BOND_CHECK_LINK_MODE 0 +#endif + +/**************************selection************************************/ + +/* For Kylin: + * support Kylin-4.0.2-SP2-17122218.j1-arm64 */ +#if defined(TXGBE_SUPPORT_KYLIN_FT) || defined(TXGBE_SUPPORT_KYLIN_LX) +#ifdef UTS_UBUNTU_RELEASE_ABI +#undef UTS_UBUNTU_RELEASE_ABI +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0) +#define UTS_UBUNTU_RELEASE_ABI 21 +#else +#define UTS_UBUNTU_RELEASE_ABI 21 +#endif +#endif + +#ifdef UTS_KYLINOS_RELEASE_ABI +#ifndef UTS_UBUNTU_RELEASE_ABI +#if UTS_KYLINOS_RELEASE_ABI <= 20171215 +#define UTS_UBUNTU_RELEASE_ABI 21 +#else +#define UTS_UBUNTU_RELEASE_ABI 21 +#endif +#endif /* !UTS_UBUNTU_RELEASE_ABI */ +#endif /* UTS_KYLINOS_RELEASE_ABI */ + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct txgbe_adapter +#define adapter_q_vector txgbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +#define CONFIG_TXGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef ipv6_authlen +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef ETH_P_8021AD +#define ETH_P_8021D 0x88A8 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) + +#ifdef CONFIG_SUSE_KERNEL +/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have + * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and + * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available. + * Only fall back to the manual table otherwise. We expect all future versions + * of SLE kernels to include these values, so the table will remain only for + * the older releases. + */ +#ifdef CONFIG_SUSE_VERSION +#ifndef CONFIG_SUSE_PATCHLEVEL +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing" +#endif +#ifndef CONFIG_SUSE_AUXRELEASE +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing" +#endif +#define SLE_VERSION_CODE SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL, CONFIG_SUSE_AUXRELEASE) +#else +/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back + * to the following table for older releases. + */ +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* !CONFIG_SUSE_VERSION */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +#ifdef CONFIG_SUSE_KERNEL +#include "kcompat_sles_defs.h" +#elif (defined(CONFIG_UOS_KERNEL) || defined(CONFIG_EULER_KERNEL) || \ + defined(CONFIG_KYLIN_KERNEL)) +#include "kcompat_chn_os.h" +#endif + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_T1000_DISABLE_PACKET_SPLIT +#define CONFIG_T1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_TGB_DISABLE_PACKET_SPLIT +#define CONFIG_TGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_TGC_DISABLE_PACKET_SPLIT +#define CONFIG_TGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int :pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_WANGXUN +#define PCI_VENDOR_ID_WANGXUN 0x8088 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#ifndef TXGBE_PROCFS +#define TXGBE_PROCFS +#endif /* TXGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#if IS_ENABLED(CONFIG_SYSFS) +#ifndef TXGBE_SYSFS +#define TXGBE_SYSFS +#endif /* TXGBE_SYSFS */ +#endif /* CONFIG_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef TXGBE_HWMON +#define TXGBE_HWMON +#endif /* TXGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_TXGBE_DEBUG_FS +#undef HAVE_TGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_TXGBE_DEBUG_FS +#define HAVE_TGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 3)) +#ifdef TGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* TGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define HAVE_NO_BITMAP +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 2)) +#undef txgbe_get_netdev_tc_txq +#define txgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(6,6)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(11,4,0))) +static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) +{ + if (cmp1.tv64 < cmp2.tv64) + return -1; + if (cmp1.tv64 > cmp2.tv64) + return 1; + return 0; +} +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#if defined(__aarch64__) +#else +#ifndef CONFIG_PPC64 +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#endif +#endif +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#if defined (__aarch64__) +#else +#ifndef CONFIG_PPC64 +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif +#endif +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,11)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#elif (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#else +enum { + IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + IFLA_VF_LINK_STATE_DISABLE, /* link always down */ + __IFLA_VF_LINK_STATE_MAX, +}; +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#elif (defined(UTS_RELEASE) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,0)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12,1,0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +#if 0 +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +#endif +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 131)) && \ + (defined(TXGBE_SUPPORT_KYLIN_FT) || defined(TXGBE_SUPPORT_KYLIN_LX)) ) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#define HAVE_TCF_EXTS_TO_LIST +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 131)) && \ + (defined(TXGBE_SUPPORT_KYLIN_FT) || defined(TXGBE_SUPPORT_KYLIN_LX)) ) +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#endif /* !TXGBE_SUPPORT_KYLIN_FT */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16))) +#if !(defined(TXGBE_SUPPORT_KYLIN_FT)) && !(defined(TXGBE_SUPPORT_KYLIN_LX)) + +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#else /* >= 4.12 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#if !(defined(TXGBE_SUPPORT_KYLIN_FT)) && !(defined(TXGBE_SUPPORT_KYLIN_LX)) +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#endif +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#ifndef DISABLE_PACKET_SPLIT +#define HAVE_XDP_SUPPORT +#endif +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#define TC_SETUP_MQPRIO 0 +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_MQPRIO 0 +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +#if 0 +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if IS_ENABLED(CONFIG_NET_DEVLINK) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#include +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) && \ + (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif +#endif /* <4.18.0 && +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv + +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload + +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#define HAVE_AF_XDP_SUPPORT +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) +#define HAVE_DEVLINK_REGIONS +#endif /* RHEL >= 8.0 && RHEL <= 8.2 */ +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#undef HAVE_TCF_EXTS_TO_LIST +#endif /* RHEL8.0+ */ + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#ifdef HAVE_XDP_SUPPORT +#define HAVE_AF_XDP_ZC_SUPPORT +#endif +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +#if 0 +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open +#endif +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +#ifndef HAVE_DEFINE_PTP_SYSTEM +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !HAVE_DEFINE_PTP_SYSTEM */ +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +#ifndef HAVE_DEFINE_FLOW_CORRELATION +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void txgbe_flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void txgbe_flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void txgbe_flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void txgbe_flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void txgbe_flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void txgbe_flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void txgbe_flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void txgbe_flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void txgbe_flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void txgbe_flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void txgbe_flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void txgbe_flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* !HAVE_DEFINE_FLOW_CORRELATION */ +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define devlink_params_publish(devlink) do { } while (0) +#define devlink_params_unpublish(devlink) do { } while (0) +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#ifndef HAVE_NETDEV_XMIT_MORE +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +#ifndef HAVE_ETH_EXTENDED_HEADLEN +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !HAVE_ETH_EXTENDED_HEADLEN */ +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +#if 0 +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* RHEL_RELEASE_VERSION(8,1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* SLE < 15.3 */ +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) + +#ifndef HAVE_DEFINE_SKB_FRAG_OFF +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif + +#ifndef HAVE_DEFINE_SKB_FRAG_ADD +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif + +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#define cpu_latency_qos_update_request pm_qos_update_request +#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) +#define cpu_latency_qos_remove_request pm_qos_remove_request + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif /* (RHEL < 8.4) || (SLE < 15.3) */ +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#else /* SLE >= 15.3 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.8.0 */ +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#define HAVE_XDP_QUERY_PROG +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* 5.9.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,3))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#else /* SLE >= 15.3 */ + +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !(SLE >= 15.3) */ + +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0)))) +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#else /* SLE >= 15.3 */ +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#else /* >= 5.12.0 */ +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)) +#else /* >= 5.14.0 */ +#ifdef HAVE_XDP_SUPPORT +#define HAVE_XDP_NO_RETURN_RX +#endif +#endif /* 5.14.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#define NEED_ETH_HW_ADDR_SET +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +#ifdef NEED_ETH_HW_ADDR_SET +#ifndef ETH_HW_ADDR_SET +void _kc_eth_hw_addr_set_txgbe(struct net_device *dev, const void *addr); +#ifndef eth_hw_addr_set +#define eth_hw_addr_set(dev, addr) \ + _kc_eth_hw_addr_set_txgbe(dev, addr) +#endif /* eth_hw_addr_set */ +#endif /* ETH_HW_ADDR_SET */ +#endif /* NEED_ETH_HW_ADDR_SET */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0)) +#else /* >=6.1.0*/ +#define HAVE_NOT_NAPI_WEIGHT +#endif /* 6.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#else /* >=6.2.0*/ +#define HAVE_NOT_PTT_ADJFREQ +#endif /* 6.2.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6))) +#undef HAVE_XDP_BUFF_RXQ +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4)) +#undef HAVE_XDP_QUERY_PROG +#endif /* 8.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5)) +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,6)) +#else /* >= 8.6 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,6)) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* > 8.6 */ +#endif /* < 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,8)) +#define HAVE_NOT_NAPI_WEIGHT +#endif /* == 8.8 */ +/*****************************************************************************/ +#if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,6) +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif +#endif +#if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(9,0) +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,3) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#endif +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#else /* >= 9.0 */ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,0)) +#undef HAVE_ETHTOOL_COALESCE_EXTACK +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* = 9.0*/ +#define HAVE_XDP_BUFF_RXQ +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,2)) +#else /* >= 9.2 */ +#define HAVE_NOT_NAPI_WEIGHT +#endif /* < 9.2 */ + +/*****************************************************************************/ +#if SLE_VERSION_CODE >= SLE_VERSION(15,5,0) +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#define NEED_ETH_HW_ADDR_SET +#endif + +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ +static inline int +_kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ + +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +static inline void +_kc_bpf_warn_invalid_xdp_action(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + _kc_bpf_warn_invalid_xdp_action(dev, prog, act) +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +#ifndef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif + +#ifndef HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif + +#ifndef HAVE_PTP_CLOCK_INFO_ADJFINE +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#endif + +#endif /* _TXGBE_KYLIN_H__ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c new file mode 100644 index 0000000000000000000000000000000000000000..f70d309baa35380f2cbf23a65957db76eedd6e72 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c @@ -0,0 +1,1303 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_lib.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" +#include "txgbe_sriov.h" + +#ifdef HAVE_TX_MQ +/** + * txgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_dcb_vmdq(struct txgbe_adapter *adapter) +{ +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* CONFIG_FCOE */ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED)) + return true; + + /* The work is already done if the FCoE ring is shared */ + if (fcoe->offset < tcs) + return true; + + /* The FCoE rings exist separately, we need to move their reg_idx */ + if (fcoe->indices) { + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u8 fcoe_tc = txgbe_fcoe_get_tc(adapter); + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->rx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + } +#endif /* CONFIG_FCOE */ + + return true; +} + +/* txgbe_get_first_reg_idx - Return first register index associated with ring */ +static void txgbe_get_first_reg_idx(struct txgbe_adapter *adapter, u8 tc, + u16 *tx, u16 *rx) +{ + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + + if (num_tcs > 4) { + /* + * TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ + } else { + /* + * TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ + } + +} + +/** + * txgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool txgbe_cache_ring_dcb(struct txgbe_adapter *adapter) +{ + int tc, offset, rss_i, i; + u16 tx_idx, rx_idx; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + txgbe_get_first_reg_idx(adapter, (u8)tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = (u8)tc; + adapter->rx_ring[offset + i]->dcb_tc = (u8)tc; + } + } + + return true; +} +#endif /* HAVE_TX_MQ */ + +/** + * txgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_vmdq(struct txgbe_adapter *adapter) +{ +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#if IS_ENABLED(CONFIG_FCOE) + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; +#endif + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +#if IS_ENABLED(CONFIG_FCOE) + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; +#endif + + return true; +} + +/** + * txgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool txgbe_cache_ring_rss(struct txgbe_adapter *adapter) +{ + int i, reg_idx; + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i]->reg_idx = i; + } + for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) + adapter->xdp_ring[i]->reg_idx = reg_idx; + + return true; +} + +/** + * txgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void txgbe_cache_ring_register(struct txgbe_adapter *adapter) +{ +#ifdef HAVE_TX_MQ + if (txgbe_cache_ring_dcb_vmdq(adapter)) + return; + + if (txgbe_cache_ring_dcb(adapter)) + return; + +#endif + if (txgbe_cache_ring_vmdq(adapter)) + return; + + txgbe_cache_ring_rss(adapter); +} + +#define TXGBE_RSS_64Q_MASK 0x3F +#define TXGBE_RSS_16Q_MASK 0xF +#define TXGBE_RSS_8Q_MASK 0x7 +#define TXGBE_RSS_4Q_MASK 0x3 +#define TXGBE_RSS_2Q_MASK 0x1 +#define TXGBE_RSS_DISABLED_MASK 0x0 + +#ifdef HAVE_TX_MQ +/** + * txgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool txgbe_set_dcb_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; +#if IS_ENABLED(CONFIG_FCOE) + u16 fcoe_i = 0; +#endif + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = TXGBE_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = TXGBE_VMDQ_4Q_MASK; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; +#endif /* CONFIG_FCOE */ + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = TXGBE_RSS_DISABLED_MASK; + + adapter->queues_per_pool = tcs;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct txgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * tcs; + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } else if (tcs > 1) { + /* use queue belonging to FcoE TC */ + fcoe->indices = 1; + fcoe->offset = txgbe_fcoe_get_tc(adapter); + } else { + adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED; + + fcoe->indices = 0; + fcoe->offset = 0; + } + } +#endif /* CONFIG_FCOE */ + + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, (u8)i, 1, i); + + return true; +} + +/** + * txgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * @adapter: board private structure to initialize + * + * When DCB (Data Center Bridging) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort DCB + * initialization. + * + * This function handles all combinations of DCB, RSS, and FCoE. + * + **/ +static bool txgbe_set_dcb_queues(struct txgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_ring_feature *f; + u16 rss_i, rss_m, i; + u16 tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ +#ifndef HAVE_NETDEV_SELECT_QUEUE + rss_i = adapter->indices; +#else + rss_i = dev->num_tx_queues / tcs; +#endif + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = TXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = TXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when DCB is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + u8 tc = txgbe_fcoe_get_tc(adapter); + + f = &adapter->ring_feature[RING_F_FCOE]; + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } +#endif /* CONFIG_FCOE */ + + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, (u8)i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} +#endif + +static u16 txgbe_xdp_queues(struct txgbe_adapter *adapter) +{ +#ifdef HAVE_XDP_SUPPORT + u16 queues = min_t(int, MAX_XDP_QUEUES, nr_cpu_ids); + + return adapter->xdp_prog ? queues : 0; +#else + return 0; +#endif +} + +/** + * txgbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool txgbe_set_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = TXGBE_RSS_DISABLED_MASK; +#if IS_ENABLED(CONFIG_FCOE) + u16 fcoe_i = 0; +#endif + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, TXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool */ + if (vmdq_i > 32) { + vmdq_m = TXGBE_VMDQ_2Q_MASK; + rss_m = TXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = TXGBE_VMDQ_4Q_MASK; + rss_m = TXGBE_RSS_4Q_MASK; + /* We can support 4, 2, or 1 queues */ + rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); +#endif + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = vmdq_i * rss_i; +#else + adapter->num_tx_queues = vmdq_i; +#endif /* HAVE_TX_MQ */ + adapter->num_xdp_queues = 0; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the + * FCoE indices to the total ring count. + */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct txgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (vmdq_i > 1 && fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * rss_i; + } else { + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; + fcoe_i -= rss_i; + } + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } +#endif /* CONFIG_FCOE */ + + return true; +} + +/** + * txgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool txgbe_set_rss_queues(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = TXGBE_RSS_64Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* + * Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (rss_i > 1 && adapter->atr_sample_rate) { + f = &adapter->ring_feature[RING_F_FDIR]; + + rss_i = f->indices = f->limit; + + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order + * to get the best performance we allocate as many FCoE queues + * as we can and we place them at the end of the ring array to + * avoid sharing queues with standard RSS on systems with 24 or + * more CPUs. + */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct net_device *dev = adapter->netdev; + u16 fcoe_i; + + f = &adapter->ring_feature[RING_F_FCOE]; + + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + f->indices = min_t(u16, fcoe_i, f->limit); + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); + } +#endif /* CONFIG_FCOE */ + + adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; +#endif + adapter->num_xdp_queues = txgbe_xdp_queues(adapter); + return true; +} + +/* + * txgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void txgbe_set_num_queues(struct txgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->queues_per_pool = 1;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_xdp_queues = 0; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + +#ifdef HAVE_TX_MQ + if (txgbe_set_dcb_vmdq_queues(adapter)) + return; + + if (txgbe_set_dcb_queues(adapter)) + return; +#endif + if (txgbe_set_vmdq_queues(adapter)) + return; + + txgbe_set_rss_queues(adapter); +} + +/** + * txgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int txgbe_acquire_msix_vectors(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & TXGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + vectors = max(vectors, adapter->num_xdp_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= TXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void txgbe_add_ring(struct txgbe_ring *ring, + struct txgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; + head->next_update = jiffies + 1; +} + +/** + * txgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int xdp_count, unsigned int xdp_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct txgbe_q_vector *q_vector; + struct txgbe_ring *ring; +#ifdef HAVE_CPUMASK_LOCAL_SPREAD + int node = dev_to_node(&adapter->pdev->dev); +#else + int node = -1; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count + xdp_count; + size = sizeof(struct txgbe_q_vector) + + (sizeof(struct txgbe_ring) * ring_count); + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { +#ifdef HAVE_CPUMASK_LOCAL_SPREAD + cpu = cpumask_local_spread(v_idx, node); +#else + cpu = v_idx; +#endif + node = cpu_to_node(cpu); + } + } + } + +#endif + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); +#endif + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#ifndef TXGBE_NO_LRO + /* initialize LRO */ + __skb_queue_head_init(&q_vector->lrolist.active); + +#endif + /* initialize NAPI */ +#ifdef HAVE_NOT_NAPI_WEIGHT + netif_napi_add(adapter->netdev, &q_vector->napi, + txgbe_poll); +#else + netif_napi_add(adapter->netdev, &q_vector->napi, + txgbe_poll, 64); +#endif +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif /*HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD*/ + +#ifdef HAVE_NDO_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, TXGBE_QV_STATE_DISABLE); + +#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* Initialize setting for adaptive ITR */ + q_vector->tx.itr = TXGBE_ITR_ADAPTIVE_MAX_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + q_vector->rx.itr = TXGBE_ITR_ADAPTIVE_MAX_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = TXGBE_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = TXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* initialize pointer to rings */ + ring = q_vector->ring; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + txgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; + clear_ring_xdp(ring); + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + txgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + set_ring_xdp(ring); + + spin_lock_init(&ring->tx_lock); + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx++; + + /* push pointer to next ring */ + ring++; + } + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + txgbe_add_ring(ring, &q_vector->rx); + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct txgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + + if ((rxr_idx >= f->offset) && + (rxr_idx < f->offset + f->indices)) { + set_bit(__TXGBE_RX_FCOE, &ring->state); + } + } +#endif /* CONFIG_FCOE */ + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * txgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->tx){ + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } +#ifdef HAVE_XDP_SUPPORT + if (static_key_enabled((struct static_key *)&txgbe_xdp_locking_key)) + static_branch_dec(&txgbe_xdp_locking_key); +#endif + txgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); +#ifndef TXGBE_NO_LRO + __skb_queue_purge(&q_vector->lrolist.active); +#endif + kfree_rcu(q_vector, rcu); +} + +/** + * txgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int txgbe_alloc_q_vectors(struct txgbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int xdp_remaining = adapter->num_xdp_queues; + unsigned int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { + for (; rxr_remaining; v_idx++) { + err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); + err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + xqpv, xdp_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + xdp_remaining -= xqpv; + rxr_idx++; + txr_idx++; + xdp_idx += xqpv; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * txgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void txgbe_free_q_vectors(struct txgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); +} + +void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter) +{ + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * txgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!txgbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + + /* Disable DCB unless we only have a single traffic class */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_dev_warn("Number of DCB TCs exceeds number of available " + "queues. Disabling DCB support.\n"); + netdev_reset_tc(adapter->netdev); + + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + txgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + txgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & TXGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to " + "legacy. Error: %d\n", + err); + else + adapter->flags |= TXGBE_FLAG_MSI_ENABLED; +} + +/** + * txgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + txgbe_set_num_queues(adapter); + + /* Set interrupt mode */ + txgbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = txgbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + txgbe_reset_interrupt_capability(adapter); + return err; + } + + txgbe_cache_ring_register(adapter); + + set_bit(__TXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * txgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter) +{ + txgbe_free_q_vectors(adapter); + txgbe_reset_interrupt_capability(adapter); +} + +void txgbe_tx_ctxtdesc(struct txgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct txgbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = TXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= TXGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index d60c26ba0ba4c98815a9609550ea3046aabbe0eb..50b14823e4ab5e96596bb45957c0cad443c2d8de 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -1,25 +1,121 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_main.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code + */ #include #include #include #include +#include +#include #include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_lib.h" -#include "../libwx/wx_hw.h" -#include "txgbe_type.h" +#include +#include +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#ifdef NETIF_F_TSO6 +#include +#include +#endif /* NETIF_F_TSO6 */ +#endif /* NETIF_F_TSO */ +#ifdef SIOCETHTOOL +#include +#endif + +#include +#include "txgbe.h" +#ifdef HAVE_XDP_SUPPORT +#include +#include +#include +#endif +#include "txgbe_xsk.h" +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +#else +#include +#endif +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#include +#include +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD +#include +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#ifdef NETIF_F_HW_TC +#include +#include +#include +#endif /* NETIF_F_HW_TC */ + +#include "txgbe_dcb.h" +#include "txgbe_sriov.h" #include "txgbe_hw.h" #include "txgbe_phy.h" -#include "txgbe_ethtool.h" - -char txgbe_driver_name[] = "txgbe"; +#include "txgbe_pcierr.h" +#include "txgbe_bp.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" + +char txgbe_driver_name[32] = TXGBE_NAME; +static const char txgbe_driver_string[] = + "WangXun RP1000/RP2000/FF50XX PCI Express Network Driver"; + +#define DRV_HW_PERF + +#define FPGA + +#define DRIVERIOV + +#define BYPASS_TAG + +#define RELEASE_TAG + +#if (defined(TXGBE_SUPPORT_KYLIN_FT) || defined(TXGBE_SUPPORT_KYLIN_LX)) +#define DRV_VERSION __stringify(2.1.1klos) +#elif defined(CONFIG_EULER_KERNEL) +#define DRV_VERSION __stringify(2.1.1elos) +#elif defined(CONFIG_UOS_KERNEL) +#define DRV_VERSION __stringify(2.1.1uos) +#else +#define DRV_VERSION __stringify(2.1.1) +#endif +const char txgbe_driver_version[32] = DRV_VERSION; +static const char txgbe_copyright[] = + "Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd"; +static const char txgbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "If the problem persists, restart the computer, or " + "power off the system and replace the adapter"; +static const char txgbe_underheat_msg[] = + "Network adapter has been started again since the temperature " + "has been back to normal state"; /* txgbe_pci_tbl - PCI Device ID Table * @@ -30,45 +126,176 @@ char txgbe_driver_name[] = "txgbe"; * Class, Class Mask, private data (not used) } */ static const struct pci_device_id txgbe_pci_tbl[] = { - { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, - { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_SP1000), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_WX1820), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5025), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5125), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5040), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5140), 0}, /* required last entry */ { .device = 0 } }; +MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); + + +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) RP1000/RP2000/FF50XX PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#ifdef HAVE_XDP_SUPPORT +DEFINE_STATIC_KEY_FALSE(txgbe_xdp_locking_key); +EXPORT_SYMBOL(txgbe_xdp_locking_key); +#endif #define DEFAULT_DEBUG_LEVEL_SHIFT 3 -static void txgbe_check_minimum_link(struct wx *wx) +static struct workqueue_struct *txgbe_wq; + +static bool txgbe_is_sfp(struct txgbe_hw *hw); +static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev); +static void txgbe_napi_enable_all(struct txgbe_adapter *adapter); +static void txgbe_napi_disable_all(struct txgbe_adapter *adapter); + +extern txgbe_dptype txgbe_ptype_lookup[256]; + +static inline txgbe_dptype txgbe_decode_ptype(const u8 ptype) +{ + return txgbe_ptype_lookup[ptype]; +} + +static inline txgbe_dptype +decode_rx_desc_ptype(const union txgbe_rx_desc *rx_desc) +{ + return txgbe_decode_ptype(TXGBE_RXD_PKTTYPE(rx_desc)); +} + +void txgbe_print_tx_hang_status(struct txgbe_adapter *adapter) +{ + int pos; + u32 value; + struct pci_dev *pdev = adapter->pdev; + u16 devctl2; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &value); + e_info(probe, "AER Uncorrectable Error Status: 0x%08x\n", value); + txgbe_aer_print_error(adapter, TXGBE_AER_UNCORRECTABLE, value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &value); + e_info(probe, "AER Uncorrectable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &value); + e_info(probe, "AER Uncorrectable Error Severity: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_STATUS, &value); + e_info(probe, "AER Correctable Error Status: 0x%08x\n", value); + txgbe_aer_print_error(adapter, TXGBE_AER_CORRECTABLE, value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_MASK, &value); + e_info(probe, "AER Correctable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_CAP, &value); + e_info(probe, "AER Capabilities and Control Register: 0x%08x\n", value); + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &devctl2); + e_info(probe, "Device Control2 Register: 0x%04x\n", devctl2); + + e_info(probe, "Tx flow control Status[TDB_TFCS 0xCE00]: 0x%x\n", + rd32(&adapter->hw, TXGBE_TDB_TFCS)); + + e_info(tx_err, "tdm_desc_fatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180d0)); + e_info(tx_err, "tdm_desc_fatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180d4)); + e_info(tx_err, "tdm_desc_fatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180d8)); + e_info(tx_err, "tdm_desc_fatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180dc)); + e_info(tx_err, "tdm_desc_nonfatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180c0)); + e_info(tx_err, "tdm_desc_nonfatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180c4)); + e_info(tx_err, "tdm_desc_nonfatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180c8)); + e_info(tx_err, "tdm_desc_nonfatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180cc)); + + return; +} + +static void txgbe_dump_all_ring_desc(struct txgbe_adapter *adapter) { + struct txgbe_hw *hw = &adapter->hw; + union txgbe_tx_desc *tx_desc; + struct txgbe_ring *tx_ring; + int i, j; + + if (!netif_msg_tx_err(adapter)) + return; + + e_warn(tx_err, "Dump desc base addr\n"); + + for (i = 0; i < adapter->num_tx_queues; i++) { + e_warn(tx_err, "q_%d:0x%x%x\n", i, rd32(hw, TXGBE_PX_TR_BAH(i)), rd32(hw, TXGBE_PX_TR_BAL(i))); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + } +} + +static void txgbe_check_minimum_link(struct txgbe_adapter *adapter, + int expected_gts) +{ + struct txgbe_hw *hw = &adapter->hw; struct pci_dev *pdev; - pdev = wx->pdev; + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == txgbe_bus_type_internal) + return; + + pdev = adapter->pdev; pcie_print_link_status(pdev); } /** * txgbe_enumerate_functions - Get the number of ports this device has - * @wx: wx structure + * @adapter: adapter structure * * This function enumerates the phsyical functions co-located on a single slot, * in order to determine how many ports a device has. This is most useful in * determining the required GT/s of PCIe bandwidth necessary for optimal * performance. **/ -static int txgbe_enumerate_functions(struct wx *wx) +static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter) { - struct pci_dev *entry, *pdev = wx->pdev; + struct pci_dev *entry, *pdev = adapter->pdev; int physfns = 0; list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif + /* When the devices on the bus don't all match our device ID, * we can't reliably determine the correct number of * functions. This can occur if a function has been direct - * attached to a virtual machine using VT-d. + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. */ - if (entry->vendor != pdev->vendor || - entry->device != pdev->device) - return -EINVAL; + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; physfns++; } @@ -76,443 +303,13030 @@ static int txgbe_enumerate_functions(struct wx *wx) return physfns; } +void txgbe_service_event_schedule(struct txgbe_adapter *adapter) +{ + if (!test_bit(__TXGBE_DOWN, &adapter->state) && + !test_bit(__TXGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__TXGBE_SERVICE_SCHED, &adapter->state)) + queue_work(txgbe_wq, &adapter->service_task); +} + +static void txgbe_service_event_complete(struct txgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__TXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); +} + +static void txgbe_remove_adapter(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__TXGBE_SERVICE_INITED, &adapter->state)) + txgbe_service_event_schedule(adapter); +} + +static void txgbe_check_remove(struct txgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned TXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == TXGBE_CFG_PORT_ST) { + txgbe_remove_adapter(hw); + return; + } + value = rd32(hw, TXGBE_CFG_PORT_ST); + if (value == TXGBE_FAILED_READ_REG) + txgbe_remove_adapter(hw); +} + +static u32 txgbe_validate_register_read(struct txgbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct txgbe_adapter *adapter = hw->back; + + reg_addr = READ_ONCE(hw->hw_addr); + if (TXGBE_REMOVED(reg_addr)) + return TXGBE_FAILED_READ_REG; + for (i = 0; i < TXGBE_DEAD_READ_RETRIES; ++i) { + value = txgbe_rd32(reg_addr + reg); + if (value != TXGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == TXGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + /** - * txgbe_irq_enable - Enable default interrupt generation settings - * @wx: pointer to private structure - * @queues: enable irqs for queues - **/ -static void txgbe_irq_enable(struct wx *wx, bool queues) + * txgbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or TXGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns TXGBE_FAILED_READ_REG (all ones). + */ +u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (TXGBE_REMOVED(reg_addr)) + return TXGBE_FAILED_READ_REG; + value = txgbe_rd32(reg_addr + reg); + if (unlikely(value == TXGBE_FAILED_READ_REG)) + txgbe_check_remove(hw, reg); + if (unlikely(value == TXGBE_DEAD_READ_REG)) + value = txgbe_validate_register_read(hw, reg, quiet); + return value; +} + +static void txgbe_release_hw_control(struct txgbe_adapter *adapter) { - wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); +} - /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); - if (queues) - wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); +static void txgbe_get_hw_control(struct txgbe_adapter *adapter) +{ + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); } /** - * txgbe_intr - msi/legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure + * txgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * **/ -static irqreturn_t txgbe_intr(int __always_unused irq, void *data) +static void txgbe_set_ivar(struct txgbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) { - struct wx_q_vector *q_vector; - struct wx *wx = data; - struct pci_dev *pdev; - u32 eicr; + u32 ivar, index; + struct txgbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, TXGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(&adapter->hw, TXGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, TXGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, TXGBE_PX_IVAR(queue >> 1), ivar); + } +} + +void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring, + struct txgbe_tx_buffer *tx_buffer) +{ + if (!ring_is_xdp(ring) && tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + tx_buffer->va = NULL; + /* tx_buffer must be completely set up in the transmit path */ +} + +static void txgbe_update_xoff_rx_lfc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; - q_vector = wx->q_vector[0]; - pdev = wx->pdev; + if ((hw->fc.current_mode != txgbe_fc_full) && + (hw->fc.current_mode != txgbe_fc_rx_pause)) + return; - eicr = wx_misc_isb(wx, WX_ISB_VEC0); - if (!eicr) { - /* shared interrupt alert! - * the interrupt that we masked before the ICR read. - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, true); - return IRQ_NONE; /* Not our interrupt */ + data = rd32(hw, TXGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__TXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + for (i = 0; i < adapter->num_xdp_queues; i++) + clear_bit(__TXGBE_HANG_CHECK_ARMED, + &adapter->xdp_ring[i]->state); +} + +static void txgbe_update_xoff_received(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; + int tc; + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + +#ifdef HAVE_DCBNL_IEEE + if (adapter->txgbe_ieee_pfc) + pfc_en |= !!(adapter->txgbe_ieee_pfc->pfc_en); + +#endif + if (!(adapter->flags & TXGBE_FLAG_DCB_ENABLED) || !pfc_en) { + txgbe_update_xoff_rx_lfc(adapter); + return; } - wx->isb_mem[WX_ISB_VEC0] = 0; - if (!(pdev->msi_enabled)) - wr32(wx, WX_PX_INTA, 1); - wx->isb_mem[WX_ISB_MISC] = 0; - /* would disable interrupts here but it is auto disabled */ - napi_schedule_irqoff(&q_vector->napi); + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i<<16); + pxoffrxc = rd32(hw, TXGBE_MAC_PXOFFRXC); + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; + } - /* re-enable link(maybe) and non-queue interrupts, no flush. - * txgbe_poll will re-enable the queue interrupts + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + tc = tx_ring->dcb_tc; + if ((tc <= 7) && (xoff[tc])) + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + tc = xdp_ring->dcb_tc; + if (tc <= 7 && xoff[tc]) + clear_bit(__TXGBE_HANG_CHECK_ARMED, &xdp_ring->state); + } +} + +static u64 txgbe_get_tx_completed(struct txgbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 txgbe_get_tx_pending(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + u32 head, tail; + + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, TXGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, TXGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) +{ + u64 tx_done = txgbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = txgbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, false); + if (tx_done_old == tx_done && tx_pending) + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__TXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} - return IRQ_HANDLED; +static void txgbe_tx_timeout_dorecovery(struct txgbe_adapter *adapter) +{ + /* schedule immediate reset if we believe we hung */ + + if (adapter->hw.bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + } else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + txgbe_service_event_schedule(adapter); } /** - * txgbe_request_msix_irqs - Initialize MSI-X interrupts - * @wx: board private structure - * - * Allocate MSI-X vectors and request interrupts from the kernel. + * txgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct **/ -static int txgbe_request_msix_irqs(struct wx *wx) +static void txgbe_tx_timeout_reset(struct txgbe_adapter *adapter) { - struct net_device *netdev = wx->netdev; - int vector, err; + struct txgbe_hw *hw = &adapter->hw; - for (vector = 0; vector < wx->num_q_vectors; vector++) { - struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + e_warn(drv, "initiating dma reset due to tx timeout\n"); + } else { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + } + txgbe_service_event_schedule(adapter); + } +} - if (q_vector->tx.ring && q_vector->rx.ring) - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-TxRx-%d", netdev->name, entry->entry); - else - /* skip this unused q_vector */ - continue; +/** + * txgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void txgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else +static void txgbe_tx_timeout(struct net_device *netdev) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + bool tdm_desc_fatal = false; + u32 value2 = 0, value3 = 0; + bool real_tx_hang = false; + u16 pci_cmd = 0; + u32 head, tail; + u16 vid = 0; + int i; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) + real_tx_hang = true; + } - err = request_irq(entry->vector, wx_msix_clean_rings, 0, - q_vector->name, q_vector); - if (err) { - wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", - q_vector->name, err); - goto free_queue_irqs; - } + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", pci_cmd); + + if (hw->mac.type == txgbe_mac_sp) { + value2 = rd32(&adapter->hw,0x10000); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180d0); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d0 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180d4); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d4 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180d8); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d8 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180dc); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180dc value is 0x%08x\n", value2); } - return 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, TXGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); -free_queue_irqs: - while (vector) { - vector--; - free_irq(wx->msix_entries[vector].vector, - wx->q_vector[vector]); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); } - wx_reset_interrupt_capability(wx); - return err; + + value2= rd32(&adapter->hw, TXGBE_PX_IMS(0)); + value3= rd32(&adapter->hw, TXGBE_PX_IMS(1)); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PX_IMS0 value is 0x%08x, PX_IMS1 value is 0x%08x\n", value2, value3); + + /* only check pf queue tdm desc error */ + if ((rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(0)) & 0xffffffff) || + (rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(1)) & 0xffffffff)) + tdm_desc_fatal = true; + + /* PCIe link loss, tdm desc fatal error or memory space can't access */ + if (TXGBE_RECOVER_CHECK == 1) { + if (vid == TXGBE_FAILED_READ_CFG_WORD || + tdm_desc_fatal || + !(pci_cmd & 0x2)) { + txgbe_tx_timeout_dorecovery(adapter); + } else { + txgbe_print_tx_hang_status(adapter); + txgbe_tx_timeout_reset(adapter); + } + } else { + txgbe_tx_timeout_dorecovery(adapter); + } + + return; +} + + +static inline u16 txgbe_desc_buf_unmapped(struct txgbe_ring *ring, u16 ntc, u16 ntf) +{ + return ((ntc >= ntf) ? 0 : ring->count) + ntc - ntf; } /** - * txgbe_request_irq - initialize interrupts - * @wx: board private structure - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. + * txgbe_ - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean **/ -static int txgbe_request_irq(struct wx *wx) +static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring, int napi_budget) { - struct net_device *netdev = wx->netdev; - struct pci_dev *pdev = wx->pdev; - int err; + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + u16 vid = 0; + int j = 0; + u32 size; + unsigned int ntf; + struct txgbe_tx_buffer *free_tx_buffer; + u32 unmapped_descs = 0; + bool first_dma; +#ifdef TXGBE_TXHEAD_WB + u32 head = 0; + u32 temp = tx_ring->next_to_clean; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + head = *(tx_ring->headwb_mem); +#endif + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = TXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union txgbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* we have caught up to head, no work left to do */ + if (temp == head) { + break; + } else if (head > temp && !(tx_buffer->next_eop >= temp && (tx_buffer->next_eop < head))) { + break; + } else if (!(tx_buffer->next_eop >= temp || (tx_buffer->next_eop < head))) { + break; + } + } else +#endif + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + if (tx_buffer->skb) { +#ifdef HAVE_PTP_1588_CLOCK + if (!ring_is_xdp(tx_ring) && +#ifdef SKB_SHARED_TX_IS_UNION + !(skb_tx(tx_buffer->skb)->in_progress == 1)) +#else + !(skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)) +#endif +#else + if (!ring_is_xdp(tx_ring)) +#endif + skb_orphan(tx_buffer->skb); + } else{ + dev_err(tx_ring->dev, "skb is NULL.\n"); + } + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } - if (pdev->msix_enabled) - err = txgbe_request_msix_irqs(wx); - else if (pdev->msi_enabled) - err = request_irq(wx->pdev->irq, &txgbe_intr, 0, - netdev->name, wx); - else - err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED, - netdev->name, wx); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } - if (err) - wx_err(wx, "request_irq failed, Error %d\n", err); + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + + first_dma = false; + ntf = tx_ring->next_to_free; + free_tx_buffer = &tx_ring->tx_buffer_info[ntf]; + ntf -= tx_ring->count; + unmapped_descs = txgbe_desc_buf_unmapped(tx_ring, i, tx_ring->next_to_free); + while (unmapped_descs > adapter->desc_reserved) { +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(tx_ring)) { +#ifdef HAVE_XDP_FRAME_STRUCT + if (free_tx_buffer->xdpf) { + xdp_return_frame(free_tx_buffer->xdpf); + first_dma = true; + } +#else + if (free_tx_buffer->data) { + page_frag_free(free_tx_buffer->data); + first_dma = true; + } +#endif + } else + if (free_tx_buffer->skb) { + dev_consume_skb_any(free_tx_buffer->skb); + first_dma = true; + } +#else + if (free_tx_buffer->skb) { + dev_consume_skb_any(free_tx_buffer->skb); + first_dma = true; + } +#endif + if (first_dma) { + if (dma_unmap_len(free_tx_buffer, len)) { + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(free_tx_buffer, dma), + dma_unmap_len(free_tx_buffer, len), + DMA_TO_DEVICE); + } + /* clear tx_buffer data */ +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT + free_tx_buffer->xdpf = NULL; +#else + free_tx_buffer->data = NULL; +#endif + else +#endif + /* clear tx_buffer data */ + free_tx_buffer->skb = NULL; + dma_unmap_len_set(free_tx_buffer, len, 0); + free_tx_buffer->va = NULL; + first_dma = false; + } else { + /* unmap any remaining paged data */ + if (dma_unmap_len(free_tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(free_tx_buffer, dma), + dma_unmap_len(free_tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(free_tx_buffer, len, 0); + free_tx_buffer->va = NULL; + } - return err; -} + } -static void txgbe_up_complete(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - struct txgbe *txgbe; + free_tx_buffer++; + ntf++; + if (unlikely(!ntf)) { + ntf -= tx_ring->count; + free_tx_buffer = tx_ring->tx_buffer_info; + } - wx_control_hw(wx, true); - wx_configure_vectors(wx); + unmapped_descs--; + }; + + ntf += tx_ring->count; + tx_ring->next_to_free = ntf; + /* need update next_to_free before next_to_clean */ + wmb(); + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct txgbe_hw *hw = &adapter->hw; + + e_err(drv, "Detected Tx Unit Hang%s\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? " (XDP)" : "", + tx_ring->queue_index, + rd32(hw, TXGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, TXGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + if (netif_msg_tx_err(adapter)) { + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "q_[%d][%d]:0x%llx, 0x%x, 0x%x\n", + tx_ring->reg_idx, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + } - /* make sure to complete pre-operations */ - smp_mb__before_atomic(); - wx_napi_enable_all(wx); + if (netif_msg_pktdata(adapter)) { + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_err(pktdata, "tx buffer[%d][%d]: \n", tx_ring->queue_index, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->va, size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", tx_ring->queue_index, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->skb, sizeof(struct sk_buff), true); + } + } + } - txgbe = netdev_to_txgbe(netdev); - phylink_start(txgbe->phylink); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + e_info(hw, "pcie link has been lost.\n"); + } - /* clear any pending interrupts, may auto mask */ - rd32(wx, WX_PX_IC(0)); - rd32(wx, WX_PX_IC(1)); - rd32(wx, WX_PX_MISC_IC); - txgbe_irq_enable(wx, true); + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + if (TXGBE_RECOVER_CHECK == 1) { + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + txgbe_tx_timeout_dorecovery(adapter); + } else { + txgbe_print_tx_hang_status(adapter); + txgbe_tx_timeout_reset(adapter); + } + } else { + txgbe_tx_timeout_dorecovery(adapter); + } - /* enable transmits */ - netif_tx_start_all_queues(netdev); + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + if(ring_is_xdp(tx_ring)) + return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (txgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__TXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (netif_queue_stopped(tx_ring->netdev) && + !test_bit(__TXGBE_DOWN, &adapter->state)) { + netif_wake_queue(tx_ring->netdev); + ++tx_ring->tx_stats.restart_queue; + } +#endif + } + + return !!budget; } -static void txgbe_reset(struct wx *wx) + +#ifdef NETIF_F_RXHASH +#define TXGBE_RSS_L4_TYPES_MASK \ + ((1ul << TXGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_SCTP)) + +static inline void txgbe_rx_hash(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) { - struct net_device *netdev = wx->netdev; - u8 old_addr[ETH_ALEN]; - int err; + u16 rss_type; - err = txgbe_reset_hw(wx); - if (err != 0) - wx_err(wx, "Hardware Error: %d\n", err); + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; - wx_start_hw(wx); - /* do not flush user set addresses */ - memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); - wx_flush_sw_mac_table(wx); - wx_mac_set_default_filter(wx, old_addr); + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + TXGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (TXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } +#endif /* NETIF_F_RXHASH */ -static void txgbe_disable_device(struct wx *wx) +#if IS_ENABLED(CONFIG_FCOE) +/** + * txgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @ring: structure containing ring specific data + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool txgbe_rx_is_fcoe(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc) { - struct net_device *netdev = wx->netdev; - u32 i; + u8 ptype = TXGBE_RXD_PKTTYPE(rx_desc); - wx_disable_pcie_master(wx); - /* disable receives */ - wx_disable_rx(wx); + return test_bit(__TXGBE_RX_FCOE, &ring->state) && + ptype >= TXGBE_PTYPE_L2_FCOE && + ptype <= TXGBE_PTYPE_L2_FCOE_VFT_FCOTHER; +} +#endif /* CONFIG_FCOE */ - /* disable all enabled rx queues */ - for (i = 0; i < wx->num_rx_queues; i++) - /* this call also flushes the previous write */ - wx_disable_rx_queue(wx, wx->rx_ring[i]); +/** + * txgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void txgbe_rx_checksum(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + txgbe_dptype dptype = decode_rx_desc_ptype(rx_desc); - netif_tx_stop_all_queues(netdev); - netif_tx_disable(netdev); + skb->ip_summed = CHECKSUM_NONE; - wx_irq_disable(wx); - wx_napi_disable_all(wx); + skb_checksum_none_assert(skb); - if (wx->bus.func < 2) - wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); - else - wx_err(wx, "%s: invalid bus lan id %d\n", - __func__, wx->bus.func); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; - if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { - /* disable mac transmiter */ - wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + /* if IPv4 header checksum error */ + if ((txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_IPCS) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_IPE)) || + (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_OUTERIPCS) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; } - /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < wx->num_tx_queues; i++) { - u8 reg_idx = wx->tx_ring[i]->reg_idx; + /* L4 checksum offload flag must set for the below code to work */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != TXGBE_DEC_PTYPE_PROT_SCTP && + txgbe_test_staterr(rx_desc, TXGBE_RXD_IPV6EX)) + return; - wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + /* if L4 checksum error */ + if (txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= TXGBE_DEC_PTYPE_ETYPE_IG) { + #ifdef HAVE_SKBUFF_CSUM_LEVEL + skb->csum_level = 1; + #endif /* FIXME :does skb->csum_level skb->encapsulation can both set ? */ + #ifdef HAVE_VXLAN_CHECKS + skb->encapsulation = 1; + #endif } - /* Disable the Tx DMA engine */ - wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; } -static void txgbe_down(struct wx *wx) +static bool txgbe_alloc_mapped_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) { - struct txgbe *txgbe = netdev_to_txgbe(wx->netdev); + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + + } - txgbe_disable_device(wx); - txgbe_reset(wx); - phylink_stop(txgbe->phylink); + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } - wx_clean_all_tx_rings(wx); - wx_clean_all_rx_rings(wx); + bi->dma = dma; + return true; } -/** - * txgbe_init_type_code - Initialize the shared code - * @wx: pointer to hardware structure - **/ -static void txgbe_init_type_code(struct wx *wx) +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) { - u8 device_type = wx->subsystem_device_id & 0xF0; - - switch (wx->device_id) { - case TXGBE_DEV_ID_SP1000: - case TXGBE_DEV_ID_WX1820: - wx->mac.type = wx_mac_sp; - break; - default: - wx->mac.type = wx_mac_unknown; - break; + struct page *page = bi->page; + dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(txgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; } - switch (device_type) { - case TXGBE_ID_SFP: - wx->media_type = sp_media_fiber; - break; - case TXGBE_ID_XAUI: - case TXGBE_ID_SGMII: - wx->media_type = sp_media_copper; - break; - case TXGBE_ID_KR_KX_KX4: - case TXGBE_ID_MAC_XAUI: - case TXGBE_ID_MAC_SGMII: - wx->media_type = sp_media_backplane; - break; - case TXGBE_ID_SFI_XAUI: - if (wx->bus.func == 0) - wx->media_type = sp_media_fiber; - else - wx->media_type = sp_media_copper; - break; - default: - wx->media_type = sp_media_unknown; - break; + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, txgbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = txgbe_rx_offset(rx_ring); +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; +#else + bi->pagecnt_bias = 1; +#endif + return true; } +#endif /** - * txgbe_sw_init - Initialize general software structures (struct wx) - * @wx: board private structure to initialize + * txgbe_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index **/ -static int txgbe_sw_init(struct wx *wx) +static void txgbe_release_rx_desc(struct txgbe_ring *rx_ring, u32 val) { - u16 msix_count = 0; - int err; + rx_ring->next_to_use = val; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; - wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; - wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; +/** + * txgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +bool txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return false; + + rx_desc = TXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + if (!txgbe_alloc_mapped_skb(rx_ring, bi)) + goto no_buffers; + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + +#else + if (ring_is_hs_enabled(rx_ring)) { + if (!txgbe_alloc_mapped_skb(rx_ring, bi)) + goto no_buffers; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } - /* PCI config space info */ - err = wx_sw_init(wx); - if (err < 0) - return err; + if (!txgbe_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->page_dma, + bi->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); +#endif + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = TXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } - txgbe_init_type_code(wx); + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; - /* Set common capability flags and settings */ - wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; - err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS); - if (err) - wx_err(wx, "Do not support MSI-X\n"); - wx->mac.max_msix_vectors = msix_count; + cleaned_count--; + } while (cleaned_count); - /* enable itr by default in dynamic mode */ - wx->rx_itr_setting = 1; - wx->tx_itr_setting = 1; + i += rx_ring->count; - /* set default ring sizes */ - wx->tx_ring_count = TXGBE_DEFAULT_TXD; - wx->rx_ring_count = TXGBE_DEFAULT_RXD; + if (rx_ring->next_to_use != i) + txgbe_release_rx_desc(rx_ring, i); - /* set default work limits */ - wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; - wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + return false; - return 0; +no_buffers: + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + txgbe_release_rx_desc(rx_ring, i); + + return true; +} + +static inline u16 txgbe_get_hlen(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & TXGBE_RXD_HDRBUFLEN_MASK; + + UNREFERENCED_PARAMETER(rx_ring); + + if (hlen > (TXGBE_RX_HDR_SIZE << TXGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= TXGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +/** + * txgbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *txgbe_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = TXGBE_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + TXGBE_CB(tail)->head = NULL; + + return head; +} + +/** + * txgbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via txgbe_merge_active_tail. + **/ +static inline void txgbe_add_active_tail(struct sk_buff *head, + struct sk_buff *tail) +{ + struct sk_buff *old_tail = TXGBE_CB(head)->tail; + + if (old_tail) { + txgbe_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + TXGBE_CB(tail)->head = head; + TXGBE_CB(head)->tail = tail; +} + +/** + * txgbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool txgbe_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = TXGBE_CB(head)->tail; + + if (!tail) + return false; + + txgbe_merge_active_tail(tail); + + TXGBE_CB(head)->tail = NULL; + + return true; +} + +#endif +#ifdef HAVE_VLAN_RX_REGISTER +/** + * txgbe_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void txgbe_receive_skb(struct txgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + u16 vlan_tag = TXGBE_CB(skb)->vid; + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + if (vlan_tag & VLAN_VID_MASK) { + /* by placing vlgrp at start of structure we can alias it */ + struct vlan_group **vlgrp = netdev_priv(skb->dev); + if (!*vlgrp) + dev_kfree_skb_any(skb); + else if (q_vector->netpoll_rx) + vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); + else + vlan_gro_receive(&q_vector->napi, + *vlgrp, vlan_tag, skb); + } else { +#endif + if (q_vector->netpoll_rx) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + } +#endif +} + +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifndef TXGBE_NO_LRO +/** + * txgbe_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled + * @rx_ring: structure containing ring specific data + * @rx_desc: pointer to the rx descriptor + * @skb: pointer to the skb to be merged + * + **/ +static inline bool txgbe_can_lro(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + txgbe_dptype dec_ptype = decode_rx_desc_ptype(rx_desc); + + /* verify hardware indicates this is IPv4/TCP */ + if (!dec_ptype.known || + TXGBE_DEC_PTYPE_ETYPE_NONE != dec_ptype.etype || + TXGBE_DEC_PTYPE_IP_IPV4 != dec_ptype.ip || + TXGBE_DEC_PTYPE_PROT_TCP != dec_ptype.prot) + return false; + + /* .. and RSC is not already enabled */ + if (ring_is_rsc_enabled(rx_ring)) + return false; + + /* .. and LRO is enabled */ + if (!(rx_ring->netdev->features & NETIF_F_LRO)) + return false; + + /* .. and we are not in promiscuous mode */ + if (rx_ring->netdev->flags & IFF_PROMISC) + return false; + + /* .. and the header is large enough for us to read IP/TCP fields */ + if (!pskb_may_pull(skb, sizeof(struct txgbe_lrohdr))) + return false; + + /* .. and there are no VLANs on packet */ + if (skb->protocol != __constant_htons(ETH_P_IP)) + return false; + + /* .. and we are version 4 with no options */ + if (*(u8 *)iph != 0x45) + return false; + + /* .. and the packet is not fragmented */ + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + return false; + + /* .. and that next header is TCP */ + if (iph->protocol != IPPROTO_TCP) + return false; + + return true; +} + +static inline struct txgbe_lrohdr *txgbe_lro_hdr(struct sk_buff *skb) +{ + return (struct txgbe_lrohdr *)skb->data; +} + +/** + * txgbe_lro_flush - Indicate packets to upper layer. + * + * Update IP and TCP header part of head skb if more than one + * skb's chained and indicate packets to upper layer. + **/ +static void txgbe_lro_flush(struct txgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct txgbe_lro_list *lrolist = &q_vector->lrolist; + + __skb_unlink(skb, &lrolist->active); + + if (TXGBE_CB(skb)->append_cnt) { + struct txgbe_lrohdr *lroh = txgbe_lro_hdr(skb); + +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + /* close any active lro contexts */ + txgbe_close_active_frag_list(skb); + +#endif + /* incorporate ip header and re-calculate checksum */ + lroh->iph.tot_len = ntohs(skb->len); + lroh->iph.check = 0; + + /* header length is 5 since we know no options exist */ + lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); + + /* clear TCP checksum to indicate we are an LRO frame */ + lroh->th.check = 0; + + /* incorporate latest timestamp into the tcp header */ + if (TXGBE_CB(skb)->tsecr) { + lroh->ts[2] = TXGBE_CB(skb)->tsecr; + lroh->ts[1] = htonl(TXGBE_CB(skb)->tsval); + } +#ifdef NETIF_F_GSO +#ifdef NAPI_GRO_CB + NAPI_GRO_CB(skb)->data_offset = 0; +#endif + skb_shinfo(skb)->gso_size = TXGBE_CB(skb)->mss; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +#endif + } + +#ifdef HAVE_VLAN_RX_REGISTER + txgbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + lrolist->stats.flushed++; +} + +static void txgbe_lro_flush_all(struct txgbe_q_vector *q_vector) +{ + struct txgbe_lro_list *lrolist = &q_vector->lrolist; + struct sk_buff *skb, *tmp; + + skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) + txgbe_lro_flush(q_vector, skb); +} + +/* + * txgbe_lro_header_ok - Main LRO function. + **/ +static void txgbe_lro_header_ok(struct sk_buff *skb) +{ + struct txgbe_lrohdr *lroh = txgbe_lro_hdr(skb); + u16 opt_bytes, data_len; + +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + TXGBE_CB(skb)->tail = NULL; +#endif + TXGBE_CB(skb)->tsecr = 0; + TXGBE_CB(skb)->append_cnt = 0; + TXGBE_CB(skb)->mss = 0; + + /* ensure that the checksum is valid */ + if (skb->ip_summed != CHECKSUM_UNNECESSARY) + return; + + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) + return; + + /* ensure no bits set besides ack or psh */ + if (lroh->th.fin || lroh->th.syn || lroh->th.rst || + lroh->th.urg || lroh->th.ece || lroh->th.cwr || + !lroh->th.ack) + return; + + /* store the total packet length */ + data_len = ntohs(lroh->iph.tot_len); + + /* remove any padding from the end of the skb */ + __pskb_trim(skb, data_len); + + /* remove header length from data length */ + data_len -= sizeof(struct txgbe_lrohdr); + + /* + * check for timestamps. Since the only option we handle are timestamps, + * we only have to handle the simple case of aligned timestamps + */ + opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); + if (opt_bytes != 0) { + if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || + !pskb_may_pull(skb, sizeof(struct txgbe_lrohdr) + + TCPOLEN_TSTAMP_ALIGNED) || + (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP)) || + (lroh->ts[2] == 0)) { + return; + } + + TXGBE_CB(skb)->tsval = ntohl(lroh->ts[1]); + TXGBE_CB(skb)->tsecr = lroh->ts[2]; + + data_len -= TCPOLEN_TSTAMP_ALIGNED; + } + + /* record data_len as mss for the packet */ + TXGBE_CB(skb)->mss = data_len; + TXGBE_CB(skb)->next_seq = ntohl(lroh->th.seq); +} + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +static void txgbe_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) +{ + struct skb_shared_info *sh_info; + struct skb_shared_info *new_skb_info; + unsigned int data_len; + + sh_info = skb_shinfo(lro_skb); + new_skb_info = skb_shinfo(new_skb); + + /* copy frags into the last skb */ + memcpy(sh_info->frags + sh_info->nr_frags, + new_skb_info->frags, + new_skb_info->nr_frags * sizeof(skb_frag_t)); + + /* copy size data over */ + sh_info->nr_frags += new_skb_info->nr_frags; + data_len = TXGBE_CB(new_skb)->mss; + lro_skb->len += data_len; + lro_skb->data_len += data_len; + lro_skb->truesize += data_len; + + /* wipe record of data from new_skb and free it */ + new_skb_info->nr_frags = 0; + new_skb->len = new_skb->data_len = 0; + dev_kfree_skb_any(new_skb); +} + +#endif /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ +/** + * txgbe_lro_receive - if able, queue skb into lro chain + * @q_vector: structure containing interrupt and ring information + * @new_skb: pointer to current skb being checked + * + * Checks whether the skb given is eligible for LRO and if that's + * fine chains it to the existing lro_skb based on flowid. If an LRO for + * the flow doesn't exist create one. + **/ +static void txgbe_lro_receive(struct txgbe_q_vector *q_vector, + struct sk_buff *new_skb) +{ + struct sk_buff *lro_skb; + struct txgbe_lro_list *lrolist = &q_vector->lrolist; + struct txgbe_lrohdr *lroh = txgbe_lro_hdr(new_skb); + __be32 saddr = lroh->iph.saddr; + __be32 daddr = lroh->iph.daddr; + __be32 tcp_ports = *(__be32 *)&lroh->th; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid = TXGBE_CB(new_skb)->vid; +#else + u16 vid = new_skb->vlan_tci; +#endif + + txgbe_lro_header_ok(new_skb); + + /* + * we have a packet that might be eligible for LRO, + * so see if it matches anything we might expect + */ + skb_queue_walk(&lrolist->active, lro_skb) { + u16 data_len; + + if (*(__be32 *)&txgbe_lro_hdr(lro_skb)->th != tcp_ports || + txgbe_lro_hdr(lro_skb)->iph.saddr != saddr || + txgbe_lro_hdr(lro_skb)->iph.daddr != daddr) + continue; + +#ifdef HAVE_VLAN_RX_REGISTER + if (TXGBE_CB(lro_skb)->vid != vid) +#else + if (lro_skb->vlan_tci != vid) +#endif + continue; + + /* out of order packet */ + if (TXGBE_CB(lro_skb)->next_seq != + TXGBE_CB(new_skb)->next_seq) { + txgbe_lro_flush(q_vector, lro_skb); + TXGBE_CB(new_skb)->mss = 0; + break; + } + + /* TCP timestamp options have changed */ + if (!TXGBE_CB(lro_skb)->tsecr != !TXGBE_CB(new_skb)->tsecr) { + txgbe_lro_flush(q_vector, lro_skb); + break; + } + + /* make sure timestamp values are increasing */ + if (TXGBE_CB(lro_skb)->tsecr && + TXGBE_CB(lro_skb)->tsval > TXGBE_CB(new_skb)->tsval) { + txgbe_lro_flush(q_vector, lro_skb); + TXGBE_CB(new_skb)->mss = 0; + break; + } + + data_len = TXGBE_CB(new_skb)->mss; + + /* Check for all of the above below + * malformed header + * no tcp data + * resultant packet would be too large + * new skb is larger than our current mss + * data would remain in header + * we would consume more frags then the sk_buff contains + * ack sequence numbers changed + * window size has changed + */ + if (data_len == 0 || + data_len > TXGBE_CB(lro_skb)->mss || + data_len > TXGBE_CB(lro_skb)->free || +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + data_len != new_skb->data_len || + skb_shinfo(new_skb)->nr_frags >= + (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) || +#endif + txgbe_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || + txgbe_lro_hdr(lro_skb)->th.window != lroh->th.window) { + txgbe_lro_flush(q_vector, lro_skb); + break; + } + + /* Remove IP and TCP header */ + skb_pull(new_skb, new_skb->len - data_len); + + /* update timestamp and timestamp echo response */ + TXGBE_CB(lro_skb)->tsval = TXGBE_CB(new_skb)->tsval; + TXGBE_CB(lro_skb)->tsecr = TXGBE_CB(new_skb)->tsecr; + + /* update sequence and free space */ + TXGBE_CB(lro_skb)->next_seq += data_len; + TXGBE_CB(lro_skb)->free -= data_len; + + /* update append_cnt */ + TXGBE_CB(lro_skb)->append_cnt++; + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + /* if header is empty pull pages into current skb */ + txgbe_merge_frags(lro_skb, new_skb); +#else + /* chain this new skb in frag_list */ + txgbe_add_active_tail(lro_skb, new_skb); +#endif + + if ((data_len < TXGBE_CB(lro_skb)->mss) || lroh->th.psh || + skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) { + txgbe_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; + txgbe_lro_flush(q_vector, lro_skb); + } + + lrolist->stats.coal++; + return; + } + + if (TXGBE_CB(new_skb)->mss && !lroh->th.psh) { + /* if we are at capacity flush the tail */ + if (skb_queue_len(&lrolist->active) >= TXGBE_LRO_MAX) { + lro_skb = skb_peek_tail(&lrolist->active); + if (lro_skb) + txgbe_lro_flush(q_vector, lro_skb); + } + + /* update sequence and free space */ + TXGBE_CB(new_skb)->next_seq += TXGBE_CB(new_skb)->mss; + TXGBE_CB(new_skb)->free = 65521 - new_skb->len; + + /* .. and insert at the front of the active list */ + __skb_queue_head(&lrolist->active, new_skb); + + lrolist->stats.coal++; + return; + } + + /* packet not handled by any of the above, pass it to the stack */ +#ifdef HAVE_VLAN_RX_REGISTER + txgbe_receive_skb(q_vector, new_skb); +#else + napi_gro_receive(&q_vector->napi, new_skb); +#endif /* HAVE_VLAN_RX_REGISTER */ +} + +#endif /* TXGBE_NO_LRO */ +#ifdef NETIF_F_GSO +static void txgbe_set_rsc_gso_size(struct txgbe_ring __maybe_unused *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + TXGBE_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +#endif /* NETIF_F_GSO */ +static void txgbe_update_rsc_stats(struct txgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!TXGBE_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += TXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + +#ifdef NETIF_F_GSO + txgbe_set_rsc_gso_size(rx_ring, skb); + +#endif + /* gso_size is computed using append_cnt so always clear it last */ + TXGBE_CB(skb)->append_cnt = 0; +} + +static void txgbe_rx_vlan(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifndef HAVE_VLAN_RX_REGISTER + u8 idx = 0; + u16 ethertype; +#endif +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if ((ring->netdev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) && +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if ((ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && +#else + if ((ring->netdev->features & NETIF_F_HW_VLAN_RX) && +#endif + txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_VP)) +#ifndef HAVE_VLAN_RX_REGISTER + { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + TXGBE_RXD_TPID_MASK) >> TXGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +#else /* !HAVE_VLAN_RX_REGISTER */ + TXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + TXGBE_CB(skb)->vid = 0; +#endif /* !HAVE_VLAN_RX_REGISTER */ +} + +/** + * txgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +void txgbe_process_skb_fields(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_PTP_1588_CLOCK + u32 flags = rx_ring->q_vector->adapter->flags; +#endif /* HAVE_PTP_1588_CLOCK */ + + txgbe_update_rsc_stats(rx_ring, skb); + +#ifdef NETIF_F_RXHASH + txgbe_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ + + txgbe_rx_checksum(rx_ring, rx_desc, skb); +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(flags & TXGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_TS))) { + txgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } +#endif /* HAVE_PTP_1588_CLOCK */ + + txgbe_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +void txgbe_rx_skb(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (txgbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif + +#ifndef TXGBE_NO_LRO + if (txgbe_can_lro(rx_ring, rx_desc, skb)) + txgbe_lro_receive(q_vector, skb); + else +#endif +#ifdef HAVE_VLAN_RX_REGISTER + txgbe_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif + +#ifndef NETIF_F_GRO + rx_ring->netdev->last_rx = jiffies; +#endif +} + +/** + * txgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool txgbe_is_non_eop(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + struct txgbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#else + struct sk_buff *next_skb; +#endif + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(TXGBE_RX_DESC(rx_ring, ntc)); + + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(TXGBE_RXD_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= TXGBE_RXD_RSCCNT_SHIFT; + TXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= TXGBE_RXD_NEXTP_MASK; + ntc >>= TXGBE_RXD_NEXTP_SHIFT; + } + } + + /* if we are the last buffer then there is nothing else to do */ + if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + txgbe_add_active_tail(skb, next_skb); + TXGBE_CB(next_skb)->head = skb; +#else + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \ + TXGBE_TXD_RS) + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +/** + * txgbe_pull_tail - txgbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an txgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void txgbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, TXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * txgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void txgbe_dma_sync_frag(struct txgbe_ring *rx_ring, + struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(TXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, TXGBE_CB(skb)->dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + TXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + TXGBE_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + +/** + * txgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +bool txgbe_cleanup_headers(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + if (IS_ERR(skb)) + return true; + + /* verify that the packet does not have any known errors */ + if (unlikely(txgbe_test_staterr(rx_desc, + TXGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + txgbe_pull_tail(skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* do not attempt to pad FCoE Frames as this will disrupt DDP */ + if (txgbe_rx_is_fcoe(rx_ring, rx_desc)) + return false; +#endif + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * txgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void txgbe_reuse_rx_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *old_buff) +{ + struct txgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + + +static inline bool txgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * txgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static void txgbe_add_rx_frag(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = rx_ring->rx_offset ? + SKB_DATA_ALIGN(txgbe_rx_offset(rx_ring) + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static unsigned int txgbe_rx_frame_truesize(struct txgbe_ring *rx_ring, + unsigned int size) +{ + + unsigned int truesize; +#if (PAGE_SIZE < 8192) + truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + truesize = SKB_DATA_ALIGN(TXGBE_SKB_PAD + size) +#ifdef HAVE_XDP_BUFF_FRAME_SZ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#endif + ; +#endif + return truesize; +} + +static void txgbe_rx_buffer_flip(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = txgbe_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + + +static bool txgbe_can_reuse_rx_page(struct txgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(txgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define TXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - TXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > TXGBE_LAST_OFFSET) + return false; +#endif + +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } +#else + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } +#endif + + return true; +} + +static void txgbe_put_rx_buffer(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + if (txgbe_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + txgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (!IS_ERR(skb) && TXGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + TXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif + } + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT +int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_frame *xdpf) +#else +int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_buff *xdp) +#endif +{ + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + u32 len, cmd_type = 0; + dma_addr_t dma; + u16 i; +#ifdef HAVE_XDP_FRAME_STRUCT + len = xdpf->len; +#else + len = xdp->data_end - xdp->data; +#endif + + if (unlikely(!txgbe_desc_unused(ring))) + return TXGBE_XDP_CONSUMED; + +#ifdef HAVE_XDP_FRAME_STRUCT + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); +#else + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); +#endif + if (dma_mapping_error(ring->dev, dma)) + return TXGBE_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = TXGBE_TX_DESC(ring, i); + + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + +#ifdef HAVE_XDP_FRAME_STRUCT + tx_buffer->xdpf = xdpf; +#else + tx_buffer->data = xdp->data; +#endif + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + cmd_type = txgbe_tx_cmd_type(tx_buffer->tx_flags); + cmd_type |= len | TXGBE_TXD_CMD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << TXGBE_TXD_PAYLEN_SHIFT); + + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT + ring->xdp_tx_active++; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; +#ifdef TXGBE_TXHEAD_WB + tx_buffer->next_eop = i; +#endif + ring->next_to_use = i; + + return TXGBE_XDP_TX; +} +#endif/*HAVE_XDP_SUPPORT*/ +static struct sk_buff * +txgbe_run_xdp(struct txgbe_adapter __maybe_unused *adapter, + struct txgbe_ring __maybe_unused *rx_ring, + struct txgbe_rx_buffer __maybe_unused *rx_buffer, + struct xdp_buff __maybe_unused *xdp) +{ + int result = TXGBE_XDP_PASS; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; + struct txgbe_ring *ring; +#ifdef HAVE_XDP_FRAME_STRUCT + struct xdp_frame *xdpf; +#endif + int err; + u32 act; + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) { + goto xdp_out; + } + +#ifdef HAVE_XDP_FRAME_STRUCT + prefetchw(xdp->data_hard_start); /* xdp_frame write */ +#endif + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: +#ifdef HAVE_XDP_FRAME_STRUCT + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = TXGBE_XDP_CONSUMED; + break; + } +#endif + ring = adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues]; + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); +#ifdef HAVE_XDP_FRAME_STRUCT + result = txgbe_xmit_xdp_ring(ring, xdpf); +#else + result = txgbe_xmit_xdp_ring(ring, xdp); +#endif + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) { + result = TXGBE_XDP_REDIR; + } else { + result = TXGBE_XDP_CONSUMED; + } + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + fallthrough; + case XDP_DROP: + result = TXGBE_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); +#endif /* HAVE_XDP_SUPPORT */ + + return ERR_PTR(-result); +} + +static struct txgbe_rx_buffer *txgbe_get_rx_buffer(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct txgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + txgbe_dma_sync_frag(rx_ring, *skb); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *txgbe_build_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ +#ifdef HAVE_XDP_BUFF_DATA_META + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#else + void *va = xdp->data; +#endif +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); +#ifdef HAVE_XDP_BUFF_DATA_META + if (metasize) + skb_metadata_set(skb, metasize); +#endif + + /* record DMA address if this is the start of a chain of buffers */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} +#endif + +static struct sk_buff *txgbe_construct_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via txgbe_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For txgbe_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, TXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + if (size > TXGBE_RX_HDR_SIZE) { + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), + xdp->data, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * txgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed. + **/ +static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0, xdp_xmit = 0; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + struct txgbe_adapter *adapter = q_vector->adapter; + unsigned int offset = txgbe_rx_offset(rx_ring); +#if IS_ENABLED(CONFIG_FCOE) + int ddp_bytes; + unsigned int mss = 0; +#endif /* CONFIG_FCOE */ + struct xdp_buff xdp; + xdp.data_end = NULL; + xdp.data = NULL; +#ifdef HAVE_XDP_BUFF_RXQ + xdp.rxq = &rx_ring->xdp_rxq; +#endif + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#ifdef HAVE_XDP_BUFF_FRAME_SZ +#if (PAGE_SIZE < 8192) + xdp.frame_sz = txgbe_rx_frame_truesize(rx_ring, 0); +#endif +#endif + + while (likely(total_rx_packets < budget)) { + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + txgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); +#if 0 + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) + break; +#endif + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = txgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + /* retrieve a buffer from the ring */ + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META + xdp.data_meta = xdp.data; +#endif + xdp.data_hard_start = xdp.data - offset; + xdp.data_end = xdp.data + size; + +#ifdef HAVE_XDP_BUFF_FRAME_SZ +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = txgbe_rx_frame_truesize(rx_ring, size); +#endif +#endif + //skb = txgbe_run_xdp(adapter, rx_ring, &xdp); + skb = txgbe_run_xdp(adapter, rx_ring, rx_buffer, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (TXGBE_XDP_TX | TXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + txgbe_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + txgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = txgbe_build_skb(rx_ring, rx_buffer, + &xdp, rx_desc); +#endif + } else { + skb = txgbe_construct_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + txgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (txgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (txgbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (txgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = txgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += + DIV_ROUND_UP(ddp_bytes, mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); +#ifndef NETIF_F_GRO + rx_ring->netdev->last_rx = jiffies; +#endif + continue; + } + } +#endif /* CONFIG_FCOE */ + + txgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + +#ifdef HAVE_XDP_SUPPORT + if (xdp_xmit & TXGBE_XDP_TX) { + struct txgbe_ring *ring = adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues]; + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + wmb(); + writel(ring->next_to_use, ring->tail); + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + } + if (xdp_xmit & TXGBE_XDP_REDIR) + xdp_do_flush_map(); +#endif + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + +#ifndef TXGBE_NO_LRO + txgbe_lro_flush_all(q_vector); +#endif + return total_rx_packets; +} + +#else /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ +/** + * txgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a legacy approach to Rx interrupt + * handling. This version will perform better on systems with a low cost + * dma mapping API. + * + * Returns amount of work completed. + **/ +static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_adapter *adapter = q_vector->adapter; + int ddp_bytes; + unsigned int mss = 0; +#endif /* CONFIG_FCOE */ + u16 len = 0; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + + do { + struct txgbe_rx_buffer *rx_buffer; + union txgbe_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + txgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = TXGBE_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) + break; + + /* + * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.length); + /* pull the header of the skb in */ + __skb_put(skb, len); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!TXGBE_CB(skb)->head) { + TXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + skb = txgbe_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (txgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, + TXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + TXGBE_CB(skb)->dma = 0; + + if (txgbe_close_active_frag_list(skb) && + !TXGBE_CB(skb)->append_cnt) { + /* if we got here without RSC the packet is invalid */ + dev_kfree_skb_any(skb); + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(txgbe_test_staterr(rx_desc, + TXGBE_RXD_ERR_FRAME_ERR_MASK))) { + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (txgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = txgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += + DIV_ROUND_UP(ddp_bytes, mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); +#ifndef NETIF_F_GRO + rx_ring->netdev->last_rx = jiffies; +#endif + continue; + } + } + +#endif /* CONFIG_FCOE */ + txgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + txgbe_alloc_rx_buffers(rx_ring, cleaned_count); + +#ifndef TXGBE_NO_LRO + txgbe_lro_flush_all(q_vector); + +#endif /* TXGBE_NO_LRO */ + return total_rx_packets; +} + +#endif /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int txgbe_busy_poll_recv(struct napi_struct *napi) +{ + struct txgbe_q_vector *q_vector = + container_of(napi, struct txgbe_q_vector, napi); + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_ring *ring; + int found = 0; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!txgbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + txgbe_for_each_ring(ring, q_vector->rx) { + found = txgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + txgbe_qv_unlock_poll(q_vector); + + return found; +} + +#endif /* HAVE_NDO_BUSY_POLL */ +/** + * txgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * txgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void txgbe_configure_msix(struct txgbe_adapter *adapter) +{ + u16 v_idx; + + /* Populate MSIX to EITR Select */ + if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + wr32(&adapter->hw, TXGBE_PX_ITRSEL, eitrsel); + } else { + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + txgbe_write_eitr(q_vector); + } + + txgbe_set_ivar(adapter, -1, 0, v_idx); + + wr32(&adapter->hw, TXGBE_PX_ITR(v_idx), 1950); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +#if 0 +/** + * txgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see txgbe_param.c) + **/ +static void txgbe_update_itr(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) { + itr_setting = low_latency; + } + break; + case low_latency: + if (bytes_perint > 20) { + itr_setting = bulk_latency; + } else if (bytes_perint <= 10) { + itr_setting = lowest_latency; + } + break; + case bulk_latency: + if (bytes_perint <= 20) { + itr_setting = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} +#endif + +static inline bool txgbe_container_is_rx(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *rc) +{ + return &q_vector->rx == rc; +} +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void txgbe_update_itr(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *ring_container) +{ + unsigned int itr = TXGBE_ITR_ADAPTIVE_MIN_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned long next_update = jiffies; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, ring_container->next_update)) + goto clear_counts; + + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + + if (txgbe_container_is_rx(q_vector, ring_container)) { + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (packets && packets < 24 && bytes < 12112) { + itr = TXGBE_ITR_ADAPTIVE_LATENCY; + avg_wire_size = (bytes + packets * 24) * 2; + avg_wire_size = clamp_t(unsigned int, + avg_wire_size, 2560, 12800); + goto adjust_for_speed; + } + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + itr = (q_vector->itr >> 2) + TXGBE_ITR_ADAPTIVE_MIN_INC; + if (itr > TXGBE_ITR_ADAPTIVE_MAX_USECS) + itr = TXGBE_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += TXGBE_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & TXGBE_ITR_ADAPTIVE_LATENCY; + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = q_vector->itr >> 2; + goto clear_counts; + } + + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = q_vector->itr >> 3; + if (itr < TXGBE_ITR_ADAPTIVE_MIN_USECS) + itr = TXGBE_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = TXGBE_ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 512) * + TXGBE_ITR_ADAPTIVE_MIN_INC; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + case TXGBE_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 256) * + TXGBE_ITR_ADAPTIVE_MIN_INC; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + case TXGBE_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 64) * + TXGBE_ITR_ADAPTIVE_MIN_INC; + break; + } + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & TXGBE_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - TXGBE_ITR_ADAPTIVE_MIN_INC; +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets = 0; +} + +/** + * txgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void txgbe_write_eitr(struct txgbe_q_vector *q_vector) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + itr_reg = (q_vector->itr >> 3) & TXGBE_AMLITE_MAX_EITR; + else + itr_reg = q_vector->itr & TXGBE_MAX_EITR; + itr_reg |= TXGBE_PX_ITR_CNT_WDIS; + + wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg); +} + +#if 0 +static void txgbe_set_itr(struct txgbe_q_vector *q_vector) +{ + u16 new_itr = q_vector->itr; + u8 current_itr; + + txgbe_update_itr(q_vector, &q_vector->tx); + txgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = TXGBE_100K_ITR; + break; + case low_latency: + new_itr = TXGBE_20K_ITR; + break; + case bulk_latency: + new_itr = TXGBE_12K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + txgbe_write_eitr(q_vector); + } +} +#endif + +static void txgbe_set_itr(struct txgbe_q_vector *q_vector) +{ + u32 new_itr; + + txgbe_update_itr(q_vector, &q_vector->tx); + txgbe_update_itr(q_vector, &q_vector->rx); + + /* use the smallest value of new ITR delay calculations */ + new_itr = min(q_vector->rx.itr, q_vector->tx.itr); + + /* Clear latency flag if set, shift into correct position */ + new_itr &= TXGBE_ITR_ADAPTIVE_MASK_USECS; + new_itr <<= 2; + + if (new_itr != q_vector->itr) { + /* save the algorithm value here */ + q_vector->itr = new_itr; + + txgbe_write_eitr(q_vector); + } +} +/** + * txgbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + s32 temp_state; + u16 value = 0; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + /*when pci lose link, not check over heat*/ + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) + return ; + + if (!(adapter->flags3 & TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS)) { + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + } + + temp_state = TCALL(hw, phy.ops.check_overtemp); + if (!temp_state || temp_state == TXGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == TXGBE_ERR_UNDERTEMP && + test_bit(__TXGBE_HANGING, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + adapter->flags3 &= ~TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS; + // re-enable over_heat misx itr + wr32m(&adapter->hw, TXGBE_PX_MISC_IEN, TXGBE_PX_MISC_IEN_OVER_HEAT, + TXGBE_PX_MISC_IEN_OVER_HEAT); + } + e_crit(drv, "%s\n", txgbe_underheat_msg); + + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); + netif_carrier_on(adapter->netdev); +#ifdef HAVE_VIRTUAL_STATION + netdev_for_each_all_upper_dev_rcu(adapter->netdev, + upper, iter) { + if (!netif_is_macvlan(upper)) + continue; + netif_carrier_on(upper); + } +#endif + clear_bit(__TXGBE_HANGING, &adapter->state); + } else if (temp_state == TXGBE_ERR_OVERTEMP && + !test_and_set_bit(__TXGBE_HANGING, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->flags3 |= TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS; + e_crit(drv, "%s\n", txgbe_overheat_msg); + netif_carrier_off(adapter->netdev); +#ifdef HAVE_VIRTUAL_STATION + netdev_for_each_all_upper_dev_rcu(adapter->netdev, + upper, iter) { + if (!netif_is_macvlan(upper)) + continue; + netif_carrier_off(upper); + } +#endif + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void txgbe_check_overtemp_event(struct txgbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_EVENT; + txgbe_service_event_schedule(adapter); + } +} + +static void txgbe_check_sfp_event(struct txgbe_adapter *adapter, u32 eicr) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 eicr_mask = TXGBE_PX_MISC_IC_GPIO; + u32 reg; + + if (hw->mac.type == txgbe_mac_aml40) { + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_4) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_4); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + } + } + } else if (hw->mac.type == txgbe_mac_sp || hw->mac.type == txgbe_mac_aml) { + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } + + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } + } + } + } +} + +static void txgbe_check_lsc(struct txgbe_adapter *adapter) +{ + adapter->lsc_int++; + + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + + adapter->link_check_timeout = jiffies; + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + txgbe_service_event_schedule(adapter); + } +} + +static void txgbe_check_phy_event(struct txgbe_adapter *adapter) +{ + + adapter->flags3 |= TXGBE_FLAG3_PHY_EVENT; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + txgbe_service_event_schedule(adapter); + } + +} + +/** + * txgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + struct txgbe_hw *hw = &adapter->hw; + u8 device_type = hw->subsystem_device_id & 0xF0; + + if (hw->mac.type == txgbe_mac_aml40) { + mask = TXGBE_GPIO_INTTYPE_LEVEL_4; + } else if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask = TXGBE_GPIO_INTTYPE_LEVEL_2 | TXGBE_GPIO_INTTYPE_LEVEL_3 | + TXGBE_GPIO_INTTYPE_LEVEL_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTTYPE_LEVEL, mask); + + /* enable misc interrupt */ + mask = TXGBE_PX_MISC_IEN_MASK; + + if (hw->mac.type != txgbe_mac_sp) + mask &= ~TXGBE_PX_MISC_IEN_ETH_EVENT; + + if (adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= TXGBE_PX_MISC_IEN_OVER_HEAT; + + if (adapter->flags3 & TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS) + mask &= ~TXGBE_PX_MISC_IEN_OVER_HEAT; + + if ((adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + mask |= TXGBE_PX_MISC_IEN_FLOW_DIR; + +#ifdef HAVE_PTP_1588_CLOCK + mask |= TXGBE_PX_MISC_IEN_TIMESYNC; +#endif /* HAVE_PTP_1588_CLOCK */ + + if (netif_msg_tx_err(adapter)) + mask |= TXGBE_PX_MISC_IEN_TXDESC; + + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, mask); + /* unmask interrupt */ + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_MISC(adapter)); + if (queues) + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_QALL(adapter)); + + /* flush configuration */ + if (flush) + TXGBE_WRITE_FLUSH(&adapter->hw); + + /* enable gpio interrupt */ + if (hw->mac.type == txgbe_mac_aml40) { + mask = TXGBE_GPIO_INTEN_4; + } else if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask = TXGBE_GPIO_INTEN_2 | TXGBE_GPIO_INTEN_3 | + TXGBE_GPIO_INTEN_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTEN, mask); + +} + +static void txgbe_do_lan_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + u32 reset = 0; + u32 i; + + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring = adapter->tx_ring[i]; + + usec_delay(1000); + if (hw->bus.lan_id == 0) { + reset = TXGBE_MIS_RST_LAN0_RST; + } else { + reset = TXGBE_MIS_RST_LAN1_RST; + } + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); +} + +static void txgbe_tx_ring_recovery(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 desc_error[4] = {0, 0, 0, 0}; + u32 i; + + /* check tdm fatal error */ + for (i = 0; i < 4; i++) { + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + if (desc_error[i] != 0) { + e_err(drv, "TDM fatal error queue\n"); + txgbe_tx_timeout_reset(adapter); + return; + } + } + + /* check tdm non-fatal error */ + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_Q_RESET; + e_err(drv, "TDM non-fatal error, queue[%d]", i); + } + } +} + +static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + u32 value = 0; + u16 vid; + + eicr = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); + if (eicr & TXGBE_PX_MISC_IC_ETH_AN) { + if (adapter->backplane_an) + txgbe_check_lsc(adapter); + } + + if(BOND_CHECK_LINK_MODE == 1){ + if (eicr & (TXGBE_PX_MISC_IC_ETH_LKDN)){ + value = rd32(hw, 0x14404); + value = value & 0x1; + if(value == 0){ + adapter->link_up = false; + adapter->flags2 |= TXGBE_FLAG2_LINK_DOWN; + txgbe_service_event_schedule(adapter); + } + } + } else { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (eicr & TXGBE_PX_MISC_AML_ETH_LK_CHANGE) + txgbe_check_lsc(adapter); + if (eicr & TXGBE_PX_MISC_AML_ETH_PHY_EVENT) + txgbe_check_phy_event(adapter); + } else { + if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | + TXGBE_PX_MISC_IC_ETH_LKDN | + TXGBE_PX_MISC_IC_ETH_EVENT)) + txgbe_check_lsc(adapter); + } + } + + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) + txgbe_msg_task(adapter); + + if (eicr & TXGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCIe link is lost.\n"); + /*when pci lose link, not check over heat*/ + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + } else { + adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + } + } + + if (eicr & TXGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, TXGBE_MIS_ST); + if (((ecc & TXGBE_MIS_ST_LAN0_ECC) && (hw->bus.lan_id == 0)) || + ((ecc & TXGBE_MIS_ST_LAN1_ECC) && (hw->bus.lan_id == 1))) + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_ECC_ERR_RESET; + + txgbe_service_event_schedule(adapter); + } + if (eicr & TXGBE_PX_MISC_IC_DEV_RST) { + value = rd32(hw, TXGBE_TSC_LSEC_PKTNUM1); //This reg is used by fw to tell drv not to drv rst + if(!(value & 0x1)){ + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } + } + if (eicr & TXGBE_PX_MISC_IC_STALL) { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + txgbe_service_event_schedule(adapter); + } + + if (eicr & TXGBE_PX_MISC_IC_TXDESC) { + txgbe_tx_ring_recovery(adapter); + txgbe_service_event_schedule(adapter); + } + +#ifdef HAVE_TX_MQ + /* Handle Flow Director Full threshold interrupt */ + if (eicr & TXGBE_PX_MISC_IC_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *ring = adapter->tx_ring[i]; + if (test_and_clear_bit(__TXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + wr32m(hw, TXGBE_PX_MISC_IEN, + TXGBE_PX_MISC_IEN_FLOW_DIR, 0); + adapter->flags2 |= + TXGBE_FLAG2_FDIR_REQUIRES_REINIT; + txgbe_service_event_schedule(adapter); + } + } +#endif + +#if 0 + /* amlite: add SWFW mbox int */ + if (hw->mac.type == txgbe_mac_aml && eicr & TXGBE_PX_MISC_IC_MNG_HOST_MBOX) { + adapter->flags |= TXGBE_FLAG_SWFW_MBOX_NOTIFY; + txgbe_service_event_schedule(adapter); + } +#endif + + txgbe_check_sfp_event(adapter, eicr); + txgbe_check_overtemp_event(adapter, eicr); + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) + txgbe_ptp_check_pps_event(adapter); +#endif + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, false, false); + + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); + + return IRQ_HANDLED; +} + +static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct txgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * txgbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int txgbe_poll(struct napi_struct *napi, int budget) +{ + struct txgbe_q_vector *q_vector = + container_of(napi, struct txgbe_q_vector, napi); + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + +#if IS_ENABLED(CONFIG_TPH) + if (adapter->flags & TXGBE_FLAG_TPH_ENABLED) + txgbe_update_tph(q_vector); +#endif + + txgbe_for_each_ring(ring, q_vector->tx) { +#ifdef HAVE_AF_XDP_ZC_SUPPORT + bool wd = ring->xsk_pool ? + txgbe_clean_xdp_tx_irq(q_vector, ring) : + txgbe_clean_tx_irq(q_vector, ring, budget); + if (!wd) +#else + if (!txgbe_clean_tx_irq(q_vector, ring, budget)) +#endif + clean_complete = false; + } + +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + if (!txgbe_qv_lock_napi(q_vector)) + return budget; +#endif + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + txgbe_for_each_ring(ring, q_vector->rx) { +#ifdef HAVE_AF_XDP_ZC_SUPPORT + int cleaned = ring->xsk_pool ? + txgbe_clean_rx_irq_zc(q_vector, ring, + per_ring_budget) : + txgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); +#else + int cleaned = txgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ + + if (cleaned >= per_ring_budget) + clean_complete = false; + } +#ifdef HAVE_NDO_BUSY_POLL + txgbe_qv_unlock_napi(q_vector); +#endif + +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; + +#endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting == 1) + txgbe_set_itr(q_vector); + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_intr_enable(&adapter->hw, + TXGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +/** + * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * txgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int txgbe_request_msix_irqs(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &txgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt" + " '%s' Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } +#ifdef HAVE_IRQ_AFFINITY_HINT + /* If Flow Director is enabled, set interrupt affinity */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, + &q_vector->affinity_mask); + } +#endif /* HAVE_IRQ_AFFINITY_HINT */ + } + + err = request_irq(adapter->msix_entries[vector].vector, + txgbe_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; +#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); +#endif + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * txgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t txgbe_intr(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_q_vector *q_vector = adapter->q_vector[0]; + struct txgbe_hw *hw = &adapter->hw; + u32 eicr_misc; + u32 value ; + u16 pci_value; + + if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED)) { + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_value); + if (!(pci_value & PCI_STATUS_INTERRUPT)) + return IRQ_HANDLED; /* Not our interrupt */ + wr32(&(adapter->hw), TXGBE_PX_INTA, 1); + } + + eicr_misc = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); + if (eicr_misc & TXGBE_PX_MISC_IC_ETH_AN) { + if (adapter->backplane_an) + txgbe_service_event_schedule(adapter); + } + + if(BOND_CHECK_LINK_MODE == 1){ + if (eicr_misc & (TXGBE_PX_MISC_IC_ETH_LKDN)){ + value = rd32(hw, 0x14404); + value = value & 0x1; + if(value == 0){ + adapter->link_up = false; + adapter->flags2 |= TXGBE_FLAG2_LINK_DOWN; + txgbe_service_event_schedule(adapter); + } + } + } else { + if (eicr_misc & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN)) + txgbe_check_lsc(adapter); + } + + if (eicr_misc & TXGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + adapter->flags2 |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED | + TXGBE_FLAG2_ECC_ERR_RESET; + txgbe_service_event_schedule(adapter); + } + + if (eicr_misc & TXGBE_PX_MISC_IC_DEV_RST) { + value = rd32(hw, TXGBE_TSC_LSEC_PKTNUM1); //This reg is used by fw to tell drv not to drv rst + if(!(value & 0x1)){ + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } + } + txgbe_check_sfp_event(adapter, eicr_misc); + txgbe_check_overtemp_event(adapter, eicr_misc); + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr_misc & TXGBE_PX_MISC_IC_TIMESYNC)) + txgbe_ptp_check_pps_event(adapter); +#endif + adapter->isb_mem[TXGBE_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * txgbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * txgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int txgbe_request_irq(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + err = txgbe_request_msix_irqs(adapter); + else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &txgbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &txgbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void txgbe_free_irq(struct txgbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + +#endif + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * txgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void txgbe_irq_disable(struct txgbe_adapter *adapter) +{ + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, 0); + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + + TXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * txgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void txgbe_configure_msi_and_legacy(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[0]; + struct txgbe_ring *ring; + + txgbe_write_eitr(q_vector); + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + txgbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/* amlite: tx header wb */ +#ifdef TXGBE_TXHEAD_WB +static int txgbe_setup_headwb_resources(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct device *dev = ring->dev; + u8 headwb_size = 0; + + if (ring->q_vector) { + adapter = ring->q_vector->adapter; + hw = &adapter->hw; + if (hw->mac.type == txgbe_mac_sp) + return 0; + } else { + return 0; + } + + if (TXGBE_TXHEAD_WB == 1) + headwb_size = 16; + else if (TXGBE_TXHEAD_WB == 2) + headwb_size = 16; + else + headwb_size = 1; + + ring->headwb_mem = dma_alloc_coherent(dev, + sizeof(u32) * headwb_size, + &ring->headwb_dma, + GFP_KERNEL); + if (!ring->headwb_mem) { + e_err(drv, "txgbe_setup_headwb_resources no mem\n"); + return -ENOMEM; + } + memset(ring->headwb_mem, 0, sizeof(u32) * headwb_size); + + return 0; +} +#endif + +/** + * txgbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = TXGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; +#ifdef HAVE_AF_XDP_ZC_SUPPORT + ring->xsk_pool = NULL; + if (ring_is_xdp(ring)) { + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); + } +#endif + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_free = 0; + + txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + + txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize flowdirector state */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__TXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__TXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct txgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_PX_TR_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_HEAD_ADDRH(reg_idx), ring->headwb_dma >> 32); + + if (TXGBE_TXHEAD_WB == 1) + txdctl |= TXGBE_PX_TR_CFG_HEAD_WB | TXGBE_PX_TR_CFG_HEAD_WB_64BYTE; + else + txdctl |= TXGBE_PX_TR_CFG_HEAD_WB; + } +#endif + + /* enable queue */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + msleep(1); + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * txgbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void txgbe_configure_tx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i; + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_tx_queues > 1) + adapter->netdev->features |= NETIF_F_MULTI_QUEUE; + else + adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; +#endif + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + txgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); + wr32m(hw, TXGBE_TSC_BUF_AE, 0x3FF, 0x10); + + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + TCALL(hw, mac.ops.enable_sec_tx_path); + + /* enable mac transmitter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE, TXGBE_MAC_TX_CFG_TE); +} + +static void txgbe_enable_rx_drop(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + srrctl |= TXGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void txgbe_disable_rx_drop(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~TXGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter) +{ + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + +#ifdef HAVE_DCBNL_IEEE + if (adapter->txgbe_ieee_pfc) + pfc_en |= !!(adapter->txgbe_ieee_pfc->pfc_en); + +#endif + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & txgbe_fc_tx_pause) && !pfc_en)) { + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +static void txgbe_configure_srrctl(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, TXGBE_PX_RR_CFG(reg_idx), + ~(TXGBE_PX_RR_CFG_RR_HDR_SZ | + TXGBE_PX_RR_CFG_RR_BUF_SZ | + TXGBE_PX_RR_CFG_SPLIT_MODE)); + /* configure header buffer length, needed for RSC */ + srrctl |= TXGBE_RX_HDR_SIZE << TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; +#else +#if 0 + def HAVE_AF_XDP_ZC_SUPPORT + if (rx_ring->xsk_pool) { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + u32 xsk_buf_len = rx_ring->xsk_pool->chunk_size_nohr - + XDP_PACKET_HEADROOM; +#else + u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + /* If the MAC support setting RXDCTL.RLPML, the + * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and + * RXDCTL.RLPML is set to the actual UMEM buffer + * size. If not, then we are stuck with a 1k buffer + * size resolution. In this case frames larger than + * the UMEM buffer size viewed in a 1k resolution will + * be dropped. + */ + srrctl |= xsk_buf_len >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + } else { +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ + //srrctl |= txgbe_rx_bufsz(rx_ring) >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (test_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state)) { + srrctl |= TXGBE_RXBUFFER_3K >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + } else { + srrctl |= TXGBE_RXBUFFER_2K >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + } + if (ring_is_hs_enabled(rx_ring)) + srrctl |= TXGBE_PX_RR_CFG_SPLIT_MODE; +#if 0 + def HAVE_AF_XDP_ZC_SUPPORT +} +#endif +#endif + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + */ +u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter) +{ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void txgbe_store_reta(struct txgbe_adapter *adapter) +{ + u32 i, reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +void txgbe_store_vfreta(struct txgbe_adapter *adapter) +{ + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + unsigned int pf_pool = adapter->num_vfs; + u8 *indir_tbl = adapter->rss_indir_tbl; + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u32 i; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_VMRSSTBL(i >> 2, pf_pool), reta); + reta = 0; + } + } +} + +void txgbe_setup_reta(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* + * Program table for at least 4 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_reta(adapter); +} + +static void txgbe_setup_vfreta(struct txgbe_adapter *adapter) +{ + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), *(adapter->rss_key + i)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_vfreta(adapter); +} + +static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* VT, DCB and RSS do not coexist at the same time */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED && + adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_PCSD, TXGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = TXGBE_RDB_RA_CTL_RSS_IPV4 | + TXGBE_RDB_RA_CTL_RSS_IPV4_TCP | + TXGBE_RDB_RA_CTL_RSS_IPV6 | + TXGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pool = adapter->num_vfs; + u32 vfmrqc; + + /* Setup RSS through the VF registers */ + txgbe_setup_vfreta(adapter); + + vfmrqc = rd32(hw , TXGBE_RDB_PL_CFG(pool)); + vfmrqc &= ~TXGBE_RDB_PL_CFG_RSS_MASK; + vfmrqc |= rss_field | TXGBE_RDB_PL_CFG_RSS_EN; + wr32(hw, TXGBE_RDB_PL_CFG(pool), vfmrqc); + + /* Enable VF RSS mode */ + rss_field |= TXGBE_RDB_RA_CTL_MULTI_RSS; + } else { + txgbe_setup_reta(adapter); + } + + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) + rss_field |= TXGBE_RDB_RA_CTL_RSS_EN; + + wr32(hw, TXGBE_RDB_RA_CTL, rss_field); +} + +/** + * txgbe_clear_rscctl - disable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void txgbe_clear_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RSC, 0); + + clear_ring_rsc_enabled(ring); +} + +/** + * txgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void txgbe_configure_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rscctrl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + rscctrl |= TXGBE_PX_RR_CFG_RSC; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65536 + */ +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +#if (MAX_SKB_FRAGS >= 16) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_16; +#elif (MAX_SKB_FRAGS >= 8) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_8; +#elif (MAX_SKB_FRAGS >= 4) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_4; +#else + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_1; +#endif +#else /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + if (ring->rx_buf_len <= TXGBE_RXBUFFER_4K) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_16; + else if (ring->rx_buf_len <= TXGBE_RXBUFFER_8K) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_8; + else + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_4; +#endif /* !CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rscctrl); +} + +static void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + do { + msleep(1); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & TXGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); + } +} +/* disable the specified rx ring/queue */ +void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RR_EN, 0); + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & TXGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_STAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t features = netdev->features; +#endif + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + txgbe_disable_rx_queue(adapter, ring); +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if(ring->q_vector) + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); + if (ring->xsk_pool) { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + ring->zca.free = txgbe_zca_free; +#endif + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + MEM_TYPE_ZERO_COPY, + &ring->zca)); +#else + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + + } else { + if(ring->q_vector) + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + +#endif + wr32(hw, TXGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_RR_BAH(reg_idx), rdba >> 32); + + if (ring->count == TXGBE_MAX_RXD) + rxdctl |= 0 << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if (features & NETIF_F_HW_VLAN_STAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif + rxdctl |= TXGBE_PX_RR_CFG_VLAN; + else + rxdctl &= ~TXGBE_PX_RR_CFG_VLAN; + + rxdctl |= 0x1 << TXGBE_PX_RR_CFG_RR_THER_SHIFT; + +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + rxdctl |= TXGBE_PX_RR_CFG_DESC_MERGE; +#endif + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; + ring->rx_offset = txgbe_rx_offset(ring); +#endif + + txgbe_configure_srrctl(adapter, ring); + /* In ESX, RSCCTL configuration is done by on demand */ + txgbe_configure_rscctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RR_EN, TXGBE_PX_RR_CFG_RR_EN); + + txgbe_rx_desc_queue_enable(adapter, ring); +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (ring->xsk_pool) + txgbe_alloc_rx_buffers_zc(ring, txgbe_desc_unused(ring)); + else + txgbe_alloc_rx_buffers(ring, txgbe_desc_unused(ring)); +#else + txgbe_alloc_rx_buffers(ring, txgbe_desc_unused(ring)); +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +} + +static void txgbe_setup_psrtype(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = TXGBE_RDB_PL_CFG_L4HDR | + TXGBE_RDB_PL_CFG_L3HDR | + TXGBE_RDB_PL_CFG_L2HDR | + TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + TXGBE_RDB_PL_CFG_TUN_TUNHDR; + + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + for_each_set_bit(pool, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) + wr32(hw, TXGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); +} + +/** + * txgbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void txgbe_configure_bridge_mode(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int p; + + if (adapter->flags & TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, 0); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + for (p = 0; p < adapter->num_vfs; p++) { + TCALL(hw, mac.ops.set_source_address_pruning, true, p); + } + + for_each_set_bit(p, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + TCALL(hw, mac.ops.set_source_address_pruning, true, VMDQ_P(p)); + } + } else { + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + for (p = 0; p < adapter->num_vfs; p++) { + TCALL(hw, mac.ops.set_source_address_pruning, false, p); + } + + for_each_set_bit(p, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + TCALL(hw, mac.ops.set_source_address_pruning, false, VMDQ_P(p)); + } + } +} + +static void txgbe_configure_virtualization(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + u32 i; + + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) { + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_LLDP), 0); + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FC), 0); + } + return; + } + + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK | + TXGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << TXGBE_PSR_VM_CTL_POOL_SHIFT | + TXGBE_PSR_VM_CTL_REPLEN); + + for_each_set_bit(i, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + wr32m(hw, TXGBE_PSR_VM_L2CTL(i), + TXGBE_PSR_VM_L2CTL_AUPE, TXGBE_PSR_VM_L2CTL_AUPE); + } + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + + adapter->flags2 &= ~TXGBE_FLAG2_VLAN_PROMISC; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) { + return; + } + + /* configure default bridge settings */ + txgbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + TXGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FC), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + if (!adapter->vfinfo[i].spoofchk_enabled) + txgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); +#endif + /* enable ethertype anti spoofing if hw supports it */ + TCALL(hw, mac.ops.set_ethertype_anti_spoofing, true, i); + } +} + +static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct txgbe_ring *rx_ring; + int i; + u32 mhadd; +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + u16 rx_buf_len; +#endif + +#if IS_ENABLED(CONFIG_FCOE) + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & TXGBE_FLAG_FCOE_ENABLED) && + (max_frame < TXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, TXGBE_PSR_MAX_SZ); + if (max_frame != mhadd) { + wr32(hw, TXGBE_PSR_MAX_SZ, max_frame); + } + +#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) && + (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) { + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + } else if (max_frame <= TXGBE_RXBUFFER_3K) { + rx_buf_len = TXGBE_RXBUFFER_3K; + } else if (max_frame <= TXGBE_RXBUFFER_7K) { + rx_buf_len = TXGBE_RXBUFFER_7K; + } else if (max_frame <= TXGBE_RXBUFFER_15K) { + rx_buf_len = TXGBE_RXBUFFER_15K; + } else { + rx_buf_len = TXGBE_MAX_RXBUFFER; + } +#endif /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + clear_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif + if (adapter->flags & TXGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = TXGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else + clear_ring_hs_enabled(rx_ring); + + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + clear_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#if IS_ENABLED(CONFIG_FCOE) + + if (test_bit(__TXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + if (adapter->flags2 & TXGBE_FLAG2_RX_LEGACY) + continue; + + set_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + + if (TXGBE_2K_TOO_SMALL_WITH_PADDING || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif +#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ + adapter->flags2 |= TXGBE_FLAG2_RX_LEGACY; + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LEGACY_RX; +#endif /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#else /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + + rx_ring->rx_buf_len = rx_buf_len; +#if IS_ENABLED(CONFIG_FCOE) + if (test_bit(__TXGBE_RX_FCOE, &rx_ring->state) && + (rx_buf_len < TXGBE_FCOE_JUMBO_FRAME_SIZE)) + rx_ring->rx_buf_len = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ +#endif /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + } +} + +/** + * txgbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void txgbe_configure_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl, psrctl; + + /* disable receives while setting up the descriptors */ + TCALL(hw, mac.ops.disable_rx); + + txgbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_CRC_STRIP, TXGBE_RSC_CTL_CRC_STRIP); + + /* RSC Setup */ + psrctl = rd32m(hw, TXGBE_PSR_CTL, ~TXGBE_PSR_CTL_RSC_DIS); + psrctl |= TXGBE_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) + psrctl |= TXGBE_PSR_CTL_RSC_DIS; + wr32(hw, TXGBE_PSR_CTL, psrctl); + + /* Program registers for the distribution of queues */ + txgbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + txgbe_set_rx_buffer_len(adapter); + + wr32(hw, TXGBE_RDM_DCACHE_CTL, TXGBE_RDM_DCACHE_CTL_EN); + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CNT_DIS, + ~TXGBE_RDM_RSC_CTL_FREE_CNT_DIS); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, TXGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= TXGBE_RDB_PB_CTL_RXEN; + TCALL(hw, mac.ops.enable_rx_dma, rxctrl); +} + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#if defined(NETIF_F_HW_VLAN_CTAG_TX) || defined(NETIF_F_HW_VLAN_STAG_TX) +static int txgbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int txgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX || NETIF_F_HW_VLAN_STAG_TX*/ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ +static void txgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { +#ifndef HAVE_VLAN_RX_REGISTER + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); +#endif + if (!vid || !(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, true); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), true); + } + } +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* + * Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) +static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void txgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#else + return; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, true, true); + +#endif /* HAVE_VLAN_RX_REGISTER */ + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) { + if (vid && !(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), false); + } + } +#ifndef HAVE_VLAN_RX_REGISTER + + clear_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_8021P_SUPPORT +/** + * txgbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, j; + + /* leave vlan tag stripping enabled for DCB */ + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, TXGBE_PX_RR_CFG(j), + TXGBE_PX_RR_CFG_VLAN, 0); + } +} + +#endif +/** + * txgbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, TXGBE_PX_RR_CFG(j), + TXGBE_PX_RR_CFG_VLAN, TXGBE_PX_RR_CFG_VLAN); + } +} + +#ifdef HAVE_VLAN_RX_REGISTER +static void txgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) +#else +void txgbe_vlan_mode(struct net_device *netdev, u32 features) +#endif +{ +#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) + struct txgbe_adapter *adapter = netdev_priv(netdev); +#endif +#ifdef HAVE_8021P_SUPPORT + bool enable; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_disable(adapter); + + adapter->vlgrp = grp; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, true, true); +#endif +#ifdef HAVE_8021P_SUPPORT +#ifdef HAVE_VLAN_RX_REGISTER + enable = (grp || (adapter->flags & TXGBE_FLAG_DCB_ENABLED)); +#else +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)); +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX); +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + enable = !!(features & (NETIF_F_HW_VLAN_STAG_RX); +#else + enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_VLAN_RX_REGISTER */ + if (enable) + /* enable VLAN tag insert/strip */ + txgbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + txgbe_vlan_strip_disable(adapter); + +#endif /* HAVE_8021P_SUPPORT */ +} + +static void txgbe_restore_vlan(struct txgbe_adapter *adapter) +{ +#ifdef HAVE_VLAN_RX_REGISTER + txgbe_vlan_mode(adapter->netdev, adapter->vlgrp); + + /* + * add vlan ID 0 and enable vlan tag stripping so we + * always accept priority-tagged traffic + */ +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + txgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + txgbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif +#ifndef HAVE_8021P_SUPPORT + txgbe_vlan_strip_enable(adapter); +#endif + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + txgbe_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else + txgbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif + } + } +#else + struct net_device *netdev = adapter->netdev; + u16 vid; + + txgbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +#if (defined NETIF_F_HW_VLAN_CTAG_RX) || (defined NETIF_F_HW_VLAN_STAG_RX) + txgbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +#else + txgbe_vlan_rx_add_vid(netdev, vid); +#endif +#endif +} + +#endif +static u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif + else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * txgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int txgbe_write_mc_addr_list(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#endif + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + + if (netdev_mc_empty(netdev)) { + TCALL(hw, mac.ops.update_mc_addr_list, NULL, 0, + txgbe_addr_list_itr, true); + } else { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; +#else + addr_list = netdev->mc_list->dmi_addr; +#endif + addr_count = netdev_mc_count(netdev); + + TCALL(hw, mac.ops.update_mc_addr_list, addr_list, addr_count, + txgbe_addr_list_itr, true); + } + +#ifdef CONFIG_PCI_IOV + txgbe_restore_vf_multicasts(adapter); +#endif + return addr_count; +} + + +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); + } +} + +static void txgbe_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + TXGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= + ~(TXGBE_MAC_STATE_MODIFIED); + } + } +} + +int txgbe_available_rars(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, + u8 *addr) +{ + struct txgbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); + adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | + TXGBE_MAC_STATE_IN_USE); + TCALL(hw, mac.ops.set_rar, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); +} + +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools != (1ULL << pool)) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools |= (1ULL << pool); + txgbe_sync_mac_table(adapter); + return i; + } + } + } + } + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + continue; + } + adapter->mac_table[i].state |= (TXGBE_MAC_STATE_MODIFIED | + TXGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools |= (1ULL << pool); + txgbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) +{ + u32 i; + struct txgbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + txgbe_sync_mac_table(adapter); +} + +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct txgbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)){ + if (adapter->mac_table[i].pools & (1ULL << pool)) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + if (adapter->mac_table[i].pools == (1ULL << pool)) + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + + adapter->mac_table[i].pools &= ~(1ULL << pool) ; + txgbe_sync_mac_table(adapter); + } + return 0; + } + + if (adapter->mac_table[i].pools != (1 << pool)) + continue; + if ( !ether_addr_equal(addr, adapter->mac_table[i].addr)) + continue; + + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + txgbe_sync_mac_table(adapter); + return 0; + + + } + return -ENOMEM; +} + +#ifdef HAVE_SET_RX_MODE +/** + * txgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +int txgbe_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > txgbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + txgbe_del_mac_filter(adapter, ha->addr, pool); + txgbe_add_mac_filter(adapter, ha->addr, pool); +#else + txgbe_del_mac_filter(adapter, ha->da_addr, pool); + txgbe_add_mac_filter(adapter, ha->da_addr, pool); +#endif + count++; + } + } + return count; +} + +static int txgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = txgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int txgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + +#endif + +static int txgbe_add_cloud_switcher(struct txgbe_adapter *adapter, + u32 key, u16 pool) +{ + struct txgbe_hw *hw = &adapter->hw; + + UNREFERENCED_PARAMETER(pool); + + wr32(hw, TXGBE_PSR_CL_SWC_IDX, 0); + wr32(hw, TXGBE_PSR_CL_SWC_KEY, key); + wr32(hw, TXGBE_PSR_CL_SWC_CTL, + TXGBE_PSR_CL_SWC_CTL_VLD | TXGBE_PSR_CL_SWC_CTL_DST_MSK); + wr32(hw, TXGBE_PSR_CL_SWC_VM_L, 0x1); + wr32(hw, TXGBE_PSR_CL_SWC_VM_H, 0x0); + + return 0; +} + +#ifndef HAVE_VLAN_RX_REGISTER +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_STAG_FILTER) +static void txgbe_vlan_promisc_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + u32 vind; + u32 bits; + + vlnctrl = rd32(hw, TXGBE_PSR_VLAN_CTL); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + /* we need to keep the VLAN filter on in SRIOV */ + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + } else { + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + return; + } + + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= TXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + vind = VMDQ_P(0); + for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); + + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } + + /* Set all bits in the VLAN filter table array */ + for (i = 0; i < hw->mac.vft_size; i++) { + wr32(hw, TXGBE_PSR_VLAN_TBL(i), ~0U); + } +} + +static void txgbe_scrub_vfta(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, vid, bits; + u32 vfta; + u32 vind; + u32 vlvf; + + for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + + /* pull VLAN ID from VLVF */ + vid = vlvf & ~TXGBE_PSR_VLAN_SWC_VIEN; + + if (vlvf & TXGBE_PSR_VLAN_SWC_VIEN) { + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + vind = VMDQ_P(0); + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } + + /* extract values from vft_shadow and write back to VFTA */ + for (i = 0; i < hw->mac.vft_size; i++) { + vfta = hw->mac.vft_shadow[i]; + wr32(hw, TXGBE_PSR_VLAN_TBL(i), vfta); + } +} + +static void txgbe_vlan_promisc_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + /* configure vlan filtering */ + vlnctrl = rd32(hw, TXGBE_PSR_VLAN_CTL); + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~TXGBE_FLAG2_VLAN_PROMISC; + + txgbe_scrub_vfta(adapter); +} +#endif +#endif + +/** + * txgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void txgbe_set_rx_mode(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_STAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t features = netdev->features; +#endif + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, TXGBE_PSR_CTL, + ~(TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), + ~(TXGBE_PSR_VM_L2CTL_UPE | + TXGBE_PSR_VM_L2CTL_MPE | + TXGBE_PSR_VM_L2CTL_ROPE | + TXGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, TXGBE_PSR_VLAN_CTL, + ~(TXGBE_PSR_VLAN_CTL_VFE | + TXGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_MFE; + vmolr |= TXGBE_PSR_VM_L2CTL_BAM | + TXGBE_PSR_VM_L2CTL_AUPE | + TXGBE_PSR_VM_L2CTL_VACC; + +/* +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; +#endif +*/ + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + if ((adapter->flags & (TXGBE_FLAG_VMDQ_ENABLED | + TXGBE_FLAG_SRIOV_ENABLED))) + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_STAG_FILTER + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + features &= ~NETIF_F_HW_VLAN_FILTER; +#endif + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= TXGBE_PSR_CTL_MPE; + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_SAVE_MAC_ERR, + TXGBE_RSC_CTL_SAVE_MAC_ERR); +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_STAG_FILTER + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + features &= ~NETIF_F_HW_VLAN_FILTER; +#endif + } else { + vmolr |= TXGBE_PSR_VM_L2CTL_ROPE | TXGBE_PSR_VM_L2CTL_ROMPE; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, txgbe_uc_sync, txgbe_uc_unsync)) { + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE; + fctrl |= TXGBE_PSR_CTL_UPE; + e_dev_warn("netdev uc count is %d, hw available mac entry count is %d," + "enable promisc mode\n", + netdev_uc_count(netdev), txgbe_available_rars(adapter)); + } + + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = txgbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } + +// wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + wr32(hw, TXGBE_PSR_CTL, fctrl); + wr32(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); + +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + txgbe_vlan_promisc_disable(adapter); + else + txgbe_vlan_promisc_enable(adapter); +#elif defined(NETIF_F_HW_VLAN_STAG_FILTER) + if (features & NETIF_F_HW_VLAN_STAG_FILTER) + txgbe_vlan_promisc_disable(adapter); + else + txgbe_vlan_promisc_enable(adapter); +#else + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); +#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */ + + /* enable cloud switch */ + if (adapter->flags2 & TXGBE_FLAG2_CLOUD_SWITCH_ENABLED) { + txgbe_add_cloud_switcher(adapter, 0x10, 0); + } +} + +static void txgbe_napi_enable_all(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL + txgbe_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +static void txgbe_napi_disable_all(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while (!txgbe_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif + } +} + +#ifdef HAVE_DCBNL_IEEE +s32 txgbe_dcb_hw_ets(struct txgbe_hw *hw, struct ieee_ets *ets, int max_frame) +{ + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int i; + + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + txgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame); + return txgbe_dcb_hw_config(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); +} +#endif + +void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter) +{ +#ifdef HAVE_VXLAN_CHECKS + adapter->vxlan_port = 0; +#endif /* HAVE_VXLAN_CHECKS */ + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + /* Do not need to clear vxlan_port reg*/ + //wr32(&adapter->hw, TXGBE_CFG_VXLAN, 0); +} + + +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ + +#ifndef NETIF_F_GSO_GRE +#define NETIF_F_GSO_GRE 0 +#endif + +#ifndef NETIF_F_GSO_GRE_CSUM +#define NETIF_F_GSO_GRE_CSUM 0 +#endif +#ifndef NETIF_F_GSO_UDP_TUNNEL +#define NETIF_F_GSO_UDP_TUNNEL 0 +#endif + +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define TXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + +static inline unsigned long txgbe_tso_features(void) +{ + unsigned long features = 0; + +#ifdef NETIF_F_TSO + features |= NETIF_F_TSO; +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_TSO6 + features |= NETIF_F_TSO6; +#endif /* NETIF_F_TSO6 */ +#ifdef NETIF_F_GSO_PARTIAL + features |= NETIF_F_GSO_PARTIAL | TXGBE_GSO_PARTIAL_FEATURES; +#else + features |= TXGBE_GSO_PARTIAL_FEATURES; +#endif + + + return features; +} + +/* + * txgbe_configure_dcb - Configure DCB hardware support + * @adapter: txgbe adapter struct + * + * Called when the driver opens or needs to reconfigure DCB related bits. + */ +static void txgbe_configure_dcb(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + u32 msb = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; + + if (!(adapter->flags & TXGBE_FLAG_DCB_ENABLED)) + return; + + +#if IS_ENABLED(CONFIG_FCOE) + if (netdev->features & NETIF_F_FCOE_MTU) + max_frame = max_t(int, max_frame, + TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + +#ifdef HAVE_DCBNL_IEEE + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { + if (adapter->txgbe_ieee_ets) + txgbe_dcb_hw_ets(&adapter->hw, + adapter->txgbe_ieee_ets, + max_frame); + + if (adapter->txgbe_ieee_pfc && adapter->txgbe_ieee_ets) { + struct ieee_pfc *pfc = adapter->txgbe_ieee_pfc; + u8 *tc = adapter->txgbe_ieee_ets->prio_tc; + + txgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc); + } + } else +#endif /* HAVE_DCBNL_IEEE */ + { + txgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + TXGBE_DCB_TX_CONFIG); + txgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + TXGBE_DCB_RX_CONFIG); + txgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg); + } + + /* Enable RSS Hash per TC */ + while (rss_i) { + msb++; + rss_i >>= 1; + } + + /* write msb to all 8 TCs in one write */ + wr32(hw, TXGBE_RDB_RSS_TC, msb * 0x11111111); +} + +#ifndef TXGBE_NO_LLI +static void txgbe_configure_lli(struct txgbe_adapter *adapter) +{ + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + return; + + if (adapter->lli_etype) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + (TXGBE_RDB_5T_CTL1_LLI | + TXGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, TXGBE_RDB_ETYPE_CLS(0), + TXGBE_RDB_ETYPE_CLS_LLI); + wr32(&adapter->hw, TXGBE_PSR_ETYPE_SWC(0), + (adapter->lli_etype | + TXGBE_PSR_ETYPE_SWC_FILTER_EN)); + } + + if (adapter->lli_port) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + (TXGBE_RDB_5T_CTL1_LLI | + TXGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, TXGBE_RDB_5T_CTL0(0), + (TXGBE_RDB_5T_CTL0_POOL_MASK_EN | + (TXGBE_RDB_5T_CTL0_PRIORITY_MASK << + TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (TXGBE_RDB_5T_CTL0_DEST_PORT_MASK << + TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + + wr32(&adapter->hw, TXGBE_RDB_5T_SDP(0), + (adapter->lli_port << 16)); + } + + if (adapter->lli_size) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + TXGBE_RDB_5T_CTL1_LLI); + wr32m(&adapter->hw, TXGBE_RDB_LLI_THRE, + TXGBE_RDB_LLI_THRE_SZ(~0), adapter->lli_size); + wr32(&adapter->hw, TXGBE_RDB_5T_CTL0(0), + (TXGBE_RDB_5T_CTL0_POOL_MASK_EN | + (TXGBE_RDB_5T_CTL0_PRIORITY_MASK << + TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (TXGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK << + TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + wr32m(&adapter->hw, TXGBE_RDB_LLI_THRE, + TXGBE_RDB_LLI_THRE_PRIORITY_EN | + TXGBE_RDB_LLI_THRE_UP(~0), + TXGBE_RDB_LLI_THRE_PRIORITY_EN | + (adapter->lli_vlan_pri << TXGBE_RDB_LLI_THRE_UP_SHIFT)); + } +} + +#endif /* TXGBE_NO_LLI */ +/* Additional bittime to account for TXGBE framing */ +#define TXGBE_ETH_FRAMING 20 + +/* + * txgbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int txgbe_hpbthresh(struct txgbe_adapter *adapter, int pb) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + TXGBE_ETH_FRAMING; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < TXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ + + /* Calculate delay value for device */ + dv_id = TXGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + dv_id += TXGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = TXGBE_BT2KB(dv_id); + rx_pba = rd32(hw, TXGBE_RDB_PB_SZ(pb)) + >> TXGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer(%i) can not provide enough" + "headroom to suppport flow control." + "Decrease MTU or number of traffic classes\n", pb); + marker = tc + 1; + } + + return marker; +} + +/* + * txgbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int txgbe_lpbthresh(struct txgbe_adapter *adapter, int __maybe_unused pb) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < TXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ + + /* Calculate delay value for device */ + dv_id = TXGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return TXGBE_BT2KB(dv_id); +} + +/* + * txgbe_pbthresh_setup - calculate and setup high low water marks + */ +static void txgbe_pbthresh_setup(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + int i; + + if (!num_tc) + num_tc = 1; + + + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = txgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = txgbe_lpbthresh(adapter, i); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; + } + + for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; +} + +static void txgbe_configure_pb(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int hdrm; + int tc = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 32 << adapter->fdir_pballoc; + else + hdrm = 0; + + TCALL(hw, mac.ops.setup_rxpba, tc, hdrm, PBA_STRATEGY_EQUAL); + txgbe_pbthresh_setup(adapter); +} + +static void txgbe_ethertype_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_etype_filter_info *filter_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_PSR_ETYPE_SWC(i), + filter_info->etype_filters[i].etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(i), + filter_info->etype_filters[i].etqs); + TXGBE_WRITE_FLUSH(hw); + } + } +} + +static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct hlist_node *node; + struct txgbe_fdir_filter *filter; + u8 queue = 0; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + txgbe_fdir_set_input_mask(hw, &adapter->fdir_mask, + adapter->cloud_mode); + + hlist_for_each_entry_safe(filter, node, + &adapter->fdir_filter_list, fdir_node) { + if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && ring >= adapter->num_rx_queues) { + e_err(drv, + "FDIR restore failed w/o vf, ring:%u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, + "FDIR restore failed vf:%hhu, ring:%u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } + + txgbe_fdir_write_perfect_filter(hw, + &filter->filter, + filter->sw_idx, + queue, + adapter->cloud_mode); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void txgbe_configure_isb(struct txgbe_adapter *adapter) +{ + /* set ISB Address */ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +} + +static void txgbe_configure_port(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value, i; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + if (tcs > 4) + /* 8 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_NUM_VT_16 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (tcs > 1) + /* 4 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_NUM_VT_32 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (adapter->ring_feature[RING_F_RSS].mask == TXGBE_RSS_4Q_MASK) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } else { + if (tcs > 4) + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (tcs > 1) + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else + value = 0; + } + + value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_TC_MASK | + TXGBE_CFG_PORT_CTL_NUM_VT_MASK | + TXGBE_CFG_PORT_CTL_DCB_EN | + TXGBE_CFG_PORT_CTL_D_VLAN | + TXGBE_CFG_PORT_CTL_QINQ, + value); + if (adapter->tx_unidir_mode) + wr32m(hw, TXGBE_CFG_PORT_CTL, TXGBE_CFG_PORT_CTL_FORCE_LKUP, + TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + wr32(hw, TXGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, TXGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +#ifdef HAVE_VIRTUAL_STATION +static void txgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, + struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vmolr; + + /* No unicast promiscuous support for VMDQ devices. */ + vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(pool), + ~(TXGBE_PSR_VM_L2CTL_UPE | + TXGBE_PSR_VM_L2CTL_MPE | + TXGBE_PSR_VM_L2CTL_ROPE | + TXGBE_PSR_VM_L2CTL_ROMPE)); + + /* set all bits that we expect to always be set */ + vmolr |= TXGBE_PSR_VM_L2CTL_ROPE | + TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_AUPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } else { + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + txgbe_write_mc_addr_list(dev); + } + + txgbe_write_uc_addr_list(adapter->netdev, pool); + wr32(hw, TXGBE_PSR_VM_L2CTL(pool), vmolr); +} + +static void txgbe_fwd_psrtype(struct txgbe_fwd_adapter *accel) +{ + struct txgbe_adapter *adapter = accel->adapter; + int rss_i = adapter->queues_per_pool; + struct txgbe_hw *hw = &adapter->hw; + u32 psrtype = TXGBE_RDB_PL_CFG_L4HDR | + TXGBE_RDB_PL_CFG_L3HDR | + TXGBE_RDB_PL_CFG_L2HDR | + TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + TXGBE_RDB_PL_CFG_TUN_TUNHDR; + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + wr32(hw, TXGBE_RDB_PL_CFG(VMDQ_P(accel->index)), psrtype); +} + +static void txgbe_disable_fwd_ring(struct txgbe_fwd_adapter *accel, + struct txgbe_ring *rx_ring) +{ + struct txgbe_adapter *adapter = accel->adapter; + int index = rx_ring->queue_index + accel->rx_base_queue; + + /* shutdown specific queue receive and wait for dma to settle */ + txgbe_disable_rx_queue(adapter, rx_ring); + usleep_range(10000, 20000); + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_Q(index)); + txgbe_clean_rx_ring(rx_ring); + rx_ring->accel = NULL; +} + +static void txgbe_enable_fwd_ring(struct txgbe_fwd_adapter *accel, + struct txgbe_ring *rx_ring) +{ + struct txgbe_adapter *adapter = accel->adapter; + int index = rx_ring->queue_index + accel->rx_base_queue; + + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_Q(index)); +} + +static int txgbe_fwd_ring_down(struct net_device *vdev, + struct txgbe_fwd_adapter *accel) +{ + struct txgbe_adapter *adapter = accel->adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int txbase = accel->tx_base_queue; + int i; + + netif_tx_stop_all_queues(vdev); + + for (i = 0; i < adapter->queues_per_pool; i++) { + txgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; + } + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->accel = NULL; + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; + } + + return 0; +} + +static int txgbe_fwd_ring_up(struct net_device *vdev, + struct txgbe_fwd_adapter *accel) +{ + struct txgbe_adapter *adapter = accel->adapter; + unsigned int rxbase, txbase, queues; + int i, baseq, err = 0; + + if (!test_bit(accel->index, &adapter->fwd_bitmask)) + return 0; + + baseq = VMDQ_P(accel->index) * adapter->queues_per_pool; + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->index, adapter->num_vmdqs, + baseq, baseq + adapter->queues_per_pool, + adapter->fwd_bitmask); + + accel->vdev = vdev; + accel->rx_base_queue = rxbase = baseq; + accel->tx_base_queue = txbase = baseq; + + for (i = 0; i < adapter->queues_per_pool; i++) + txgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->rx_ring[rxbase + i]->netdev = vdev; + adapter->rx_ring[rxbase + i]->accel = accel; + txgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); + } + + for (i = 0; i < adapter->queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->netdev = vdev; + adapter->tx_ring[txbase + i]->accel = accel; + } + + queues = min_t(unsigned int, + adapter->queues_per_pool, vdev->num_tx_queues); + err = netif_set_real_num_tx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + err = netif_set_real_num_rx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + if (is_valid_ether_addr(vdev->dev_addr)) + txgbe_add_mac_filter(adapter, vdev->dev_addr, + VMDQ_P(accel->index)); + + txgbe_fwd_psrtype(accel); + txgbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->index), adapter); + + for (i = 0; i < adapter->queues_per_pool; i++) + txgbe_enable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + return err; +fwd_queue_err: + txgbe_fwd_ring_down(vdev, accel); + return err; +} + +static void txgbe_configure_dfwd(struct txgbe_adapter *adapter) +{ + struct net_device *upper; + struct list_head *iter; + int err; + + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct txgbe_fwd_adapter *accel = dfwd->fwd_priv; + + if (accel) { + err = txgbe_fwd_ring_up(upper, accel); + if (err) + continue; + } + } + } +} +#endif /*HAVE_VIRTUAL_STATION*/ + +static void txgbe_configure_desc_chk(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (!netif_msg_tx_err(adapter)) + return; + + for (i = 0; i < 4; i++) + wr32(hw, TXGBE_TDM_DESC_CHK(i), 0xFFFFFFFF); + + e_info(drv, "enable desc check\n"); +} + +static void txgbe_configure(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_configure_pb(adapter); + txgbe_configure_dcb(adapter); + + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + txgbe_configure_virtualization(adapter); + txgbe_configure_port(adapter); + + txgbe_set_rx_mode(adapter->netdev); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + txgbe_restore_vlan(adapter); +#endif + + TCALL(hw, mac.ops.disable_sec_rx_path); + + txgbe_ethertype_filter_restore(adapter); + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + txgbe_init_fdir_signature(&adapter->hw, + adapter->fdir_pballoc); + } else if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + txgbe_init_fdir_perfect(&adapter->hw, + adapter->fdir_pballoc, + adapter->cloud_mode); + txgbe_fdir_filter_restore(adapter); + } + + TCALL(hw, mac.ops.enable_sec_rx_path); + + TCALL(hw, mac.ops.setup_eee, + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && + (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); + +#if IS_ENABLED(CONFIG_TPH) + /* configure TPH */ + if (adapter->flags & TXGBE_FLAG_TPH_CAPABLE) + txgbe_setup_tph(adapter); +#endif + +#if IS_ENABLED(CONFIG_FCOE) + /* configure FCoE L2 filters, redirection table, and Rx control */ + txgbe_configure_fcoe(adapter); +#endif /* CONFIG_FCOE */ + + txgbe_configure_tx(adapter); + txgbe_configure_rx(adapter); + txgbe_configure_desc_chk(adapter); + txgbe_configure_isb(adapter); +#ifdef HAVE_VIRTUAL_STATION + txgbe_configure_dfwd(adapter); +#endif +} + +static bool txgbe_is_sfp(struct txgbe_hw *hw) +{ + switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_fiber_qsfp: + case txgbe_media_type_fiber: + return true; + default: + return false; + } +} + +/** + * txgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void txgbe_sfp_link_config(struct txgbe_adapter *adapter) +{ + /* + * We are assuming the worst case scenerio here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; +} + +/** + * txgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_non_sfp_link_config(struct txgbe_hw *hw) +{ + u32 speed; + bool autoneg, link_up = false; + u32 ret = TXGBE_ERR_LINK_SETUP; + + ret = TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + + if (ret) + goto link_cfg_out; + + if (link_up) + return 0; + + if ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) { + /* setup external PHY Mac Interface */ + mtdSetMacInterfaceControl(&hw->phy_dev, hw->phy.addr, MTD_MAC_TYPE_XAUI, + MTD_FALSE, MTD_MAC_SNOOP_OFF, + 0, MTD_MAC_SPEED_1000_MBPS, + MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED, + MTD_TRUE, MTD_TRUE); + + speed = hw->phy.autoneg_advertised; + if (!speed) + ret = TCALL(hw, mac.ops.get_link_capabilities, &speed, + &autoneg); + if (ret) + goto link_cfg_out; + } else { + speed = TXGBE_LINK_SPEED_10GB_FULL; + autoneg = false; + } + + ret = TCALL(hw, mac.ops.setup_link, speed, false); + +link_cfg_out: + return ret; +} + +static void txgbe_setup_gpie(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + gpie = TXGBE_PX_GPIE_MODEL; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + } + + wr32(hw, TXGBE_PX_GPIE, gpie); +} + +static void reinit_gpio_int(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg; + + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } + + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); + +} + +static void txgbe_up_complete(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + u32 links_reg; + u16 value; + + txgbe_get_hw_control(adapter); + txgbe_setup_gpie(adapter); + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + txgbe_configure_msix(adapter); + else + txgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for SFP+ fiber + * or power up mv phy + */ + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.enable_tx_laser); + if(!((( hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_resume(hw); + } + + smp_mb__before_atomic(); + clear_bit(__TXGBE_DOWN, &adapter->state); + txgbe_napi_enable_all(adapter); +#ifndef TXGBE_NO_LLI + txgbe_configure_lli(adapter); +#endif + + if (txgbe_is_sfp(hw)) { + txgbe_sfp_link_config(adapter); + } else if (txgbe_is_backplane(hw)) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } else { + err = txgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + if (hw->mac.type == txgbe_mac_aml40) { + hw->mac.ops.clear_hw_cntrs(hw); + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } + } + + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_0 | TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_3); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_1); + } else if (hw->mac.type == txgbe_mac_aml) { + hw->mac.ops.clear_hw_cntrs(hw); + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + } + } + + wr32(hw, TXGBE_GPIO_INT_POLARITY, 0x0); + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_0 | TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_4 | TXGBE_GPIO_DDR_5); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_4 | TXGBE_GPIO_DR_5); + + msleep(10); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_0); + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (links_reg & (TXGBE_CFG_PORT_ST_LINK_1G | TXGBE_CFG_PORT_ST_LINK_100M)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_1G); + } + } + } + + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); + + /* clear any pending interrupts, may auto mask */ + rd32(hw, TXGBE_PX_IC(0)); + rd32(hw, TXGBE_PX_IC(1)); + rd32(hw, TXGBE_PX_MISC_IC); + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) + wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_6); + txgbe_irq_enable(adapter, true, true); + /* enable external PHY interrupt */ + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); + /* only enable T unit int */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf043, 0x1); + /* active high */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf041, 0x0); + /* enable AN complete and link status change int */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8010, 0xc00); + } + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + hw->f2c_mod_status = false; + mod_timer(&adapter->service_timer, jiffies); +#ifdef POLL_LINK_STATUS + mod_timer(&adapter->link_check_timer,jiffies); +#endif + adapter->flags2 |= TXGBE_FLAG2_SERVICE_RUNNING; + + /* PCIE recovery: record lan status */ + if (hw->bus.lan_id == 0) { + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN0_UP, TXGBE_MIS_PRB_CTL_LAN0_UP); + } + else if (hw->bus.lan_id == 1) { + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN1_UP, TXGBE_MIS_PRB_CTL_LAN1_UP); + } + else + e_err(probe, "txgbe_up_complete:invalid bus lan id %d\n", hw->bus.lan_id); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD); + /* update setting rx tx for all active vfs */ + txgbe_set_all_vfs(adapter); + + /* clear ecc reset flag if set */ + if (adapter->flags2 & TXGBE_FLAG2_ECC_ERR_RESET) + adapter->flags2 &= ~TXGBE_FLAG2_ECC_ERR_RESET; +} + +void txgbe_reinit_locked(struct txgbe_adapter *adapter) +{ + if (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) { + return; + } + + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_REINIT; + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + txgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); + clear_bit(__TXGBE_RESETTING, &adapter->state); + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; +} + +static void txgbe_reinit_locked_dma_reset(struct txgbe_adapter *adapter) +{ +#ifdef TXGBE_DMA_RESET + struct txgbe_hw *hw = &adapter->hw; + int i; +#endif + + if (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) { + return; + } + + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_REINIT; + + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + txgbe_down(adapter); + +#ifdef TXGBE_DMA_RESET + if (TXGBE_DMA_RESET == 1) { + e_info(probe, "dma reset\n"); + + if (rd32(hw, PX_PF_PEND) & 0x3) { + e_dev_err("PX_PF_PEND case dma reset exit\n"); + goto skip_dma_rst; + } + + for (i = 0; i < 4; i++) { + if (rd32(hw, PX_VF_PEND(i))) { + e_dev_err("PX_VF_PEND case dma reset exit\n"); + goto skip_dma_rst; + } + } + wr32(hw, TXGBE_MIS_RST, + 1 << 4); + TXGBE_WRITE_FLUSH(hw); + msleep(1000); + + /* amlite: bme */ + wr32(hw, PX_PF_BME, 0x1); + } +skip_dma_rst: +#endif + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); + clear_bit(__TXGBE_RESETTING, &adapter->state); + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; +} + +void txgbe_up(struct txgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + txgbe_configure(adapter); + + txgbe_up_complete(adapter); +} + +void txgbe_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(TXGBE_FLAG2_SEARCH_FOR_SFP | + TXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + + err = TCALL(hw, mac.ops.init_hw); + switch (err) { + case 0: + case TXGBE_ERR_SFP_NOT_PRESENT: + case TXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case TXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + txgbe_tx_timeout_dorecovery(adapter); + break; + case TXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + txgbe_flush_sw_mac_table(adapter); + txgbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); + + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_reset(adapter); +#endif +} + +/** + * txgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (rx_ring->xsk_pool) { + txgbe_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + +#endif + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct txgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + if (TXGBE_CB(skb)->page_released) + dma_unmap_page_attrs(rx_ring->dev, + TXGBE_CB(skb)->dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif +#else + /* We need to clean up RSC frag lists */ + skb = txgbe_merge_active_tail(skb); + if (txgbe_close_active_frag_list(skb)) + dma_unmap_single(dev, + TXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + TXGBE_CB(skb)->dma = 0; +#endif /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + if (!rx_buffer->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + rx_buffer->page = NULL; +#endif + } + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +skip_free: +#endif +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +#endif +} + +/** + * txgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (tx_ring->xsk_pool) { + txgbe_xsk_clean_tx_ring(tx_ring); + return; + } + +#endif + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; +#ifdef HAVE_XDP_SUPPORT + if(ring_is_xdp(tx_ring)){ +#ifdef HAVE_XDP_FRAME_STRUCT + if(tx_buffer_info->xdpf) + xdp_return_frame(tx_buffer_info->xdpf); +#else + if(tx_buffer_info->data) + page_frag_free(tx_buffer_info->data); +#endif + } +#endif + txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * txgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void txgbe_clean_all_rx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * txgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void txgbe_clean_all_tx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + txgbe_clean_tx_ring(adapter->xdp_ring[i]); +} + +static void txgbe_fdir_filter_exit(struct txgbe_adapter *adapter) +{ + struct hlist_node *node; + struct txgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void txgbe_disable_device(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__TXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + if (!(adapter->flags2 & TXGBE_FLAG2_ECC_ERR_RESET)) + txgbe_disable_pcie_master(hw); + + /* disable receives */ + TCALL(hw, mac.ops.disable_rx); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + txgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_rcu(); + +#ifdef HAVE_VIRTUAL_STATION + /* disable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + } +#endif + txgbe_irq_disable(adapter); + + txgbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(TXGBE_FLAG2_FDIR_REQUIRES_REINIT | + TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_DEV_RESET_REQUESTED | + TXGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); +#ifdef POLL_LINK_STATUS + del_timer_sync(&adapter->link_check_timer); +#endif + adapter->flags2 &= ~TXGBE_FLAG2_SERVICE_RUNNING; + + hw->f2c_mod_status = false; + cancel_work_sync(&adapter->sfp_sta_task); + + /* PCIE recovery: record lan status, clear */ + if (hw->bus.lan_id == 0) + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN0_UP, 0); + else if (hw->bus.lan_id == 1) + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN1_UP, 0); + else + e_dev_err("txgbe_disable_device:invalid bus lan id %d\n", hw->bus.lan_id); + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* Disable all VFTE/VFRE TX/RX */ + txgbe_set_all_vfs(adapter); + } + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + /* disable mac transmiter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE, 0); + } + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), + TXGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the Tx DMA engine */ + wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); + + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); +} + + +void txgbe_down(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_disable_device(adapter); + +#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) +#endif + txgbe_reset(adapter); + + /* power down the optics for SFP+ fiber or mv phy */ + if(!((( hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.disable_tx_laser); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); +} + +/** + * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) + * @adapter: board private structure to initialize + * + * txgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct txgbe_hw *hw = &adapter->hw; + unsigned int fdir; + u32 ssid = 0; + int err; +#if IS_ENABLED(CONFIG_DCB) + struct txgbe_dcb_tc_config *tc; + int j, bwg_pct; +#endif /* CONFIG_DCB */ + u32 fw_version; + u32 flash_header; + u32 flash_header_index; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == TXGBE_FAILED_READ_CFG_BYTE && + txgbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } + + err = txgbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + txgbe_flash_read_dword(hw, 0x0, &flash_header); + if (((flash_header >> 16) & 0xffff) == TXGBE_FLASH_HEADER_FLAG) + flash_header_index = 0x0; + else + flash_header_index = 0x1; + + hw->oem_svid = pdev->subsystem_vendor; + hw->oem_ssid = pdev->subsystem_device; + if (pdev->subsystem_vendor == 0x8088) { + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + } else { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_flash_read_dword(hw, (flash_header_index * 0x10000) + 0x302c, &ssid); + else + txgbe_flash_read_dword(hw, 0xfffdc, &ssid); + + if (ssid == 0x1) { + e_err(probe, "read of internel subsystem device id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_device_id = (u16)ssid; + hw->subsystem_device_id = hw->subsystem_device_id >> 8 | + hw->subsystem_device_id << 8; + } + + txgbe_flash_read_dword(hw, (flash_header_index * 0x10000) + 0x13a, &fw_version); + snprintf(adapter->fl_version, sizeof(adapter->fw_version), + "0x%08x", fw_version); + + adapter->mac_table = kzalloc(sizeof(struct txgbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = TXGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); +#ifdef HAVE_AF_XDP_SUPPORT + adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + if (!adapter->af_xdp_zc_qps) + return -ENOMEM; +#endif + /* Set common capability flags and settings */ +#if IS_ENABLED(CONFIG_TPH) + adapter->flags |= TXGBE_FLAG_TPH_CAPABLE; +#endif +#if IS_ENABLED(CONFIG_FCOE) + adapter->flags |= TXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED; +#if IS_ENABLED(CONFIG_DCB) + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = TXGBE_FCOE_DEFUP; + adapter->fcoe.up_set = TXGBE_FCOE_DEFUP; +#endif /* CONFIG_DCB */ +#endif /* CONFIG_FCOE */ + adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; + adapter->flags2 |= TXGBE_FLAG2_RSC_CAPABLE; + fdir = min_t(int, TXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= TXGBE_FLAGS_SP_INIT; + adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + hw->phy.smart_speed = txgbe_smart_speed_off; + adapter->flags2 |= TXGBE_FLAG2_EEE_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); +#endif /* CONFIG_FCOE */ + + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + + mutex_init(&adapter->e56_lock); + +#if IS_ENABLED(CONFIG_DCB) + + adapter->dcb_cfg.num_tcs.pg_tcs = 8; + adapter->dcb_cfg.num_tcs.pfc_tcs = 8; + + + /* Configure DCB traffic classes */ + bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; + for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = 0; + tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = 0; + tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + tc->pfc = txgbe_dcb_pfc_disabled; + } + + /* reset back to TC 0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + + /* total of all TCs bandwidth needs to be 100 */ + bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; + tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[TXGBE_DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[TXGBE_DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.rx_pba_cfg = txgbe_dcb_pba_equal; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.round_robin_enable = false; + adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); +#endif /* CONFIG_DCB */ + + TCALL(hw, mbx.ops.init_params); + + /* default flow control settings */ + hw->fc.requested_mode = txgbe_fc_full; + hw->fc.current_mode = txgbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = TXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + hw->dac_sfp = false; + + /* set default ring sizes */ + adapter->tx_ring_count = TXGBE_DEFAULT_TXD; + adapter->rx_ring_count = TXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = TXGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + adapter->cmplt_to_dis = false; + + /* PF holds first pool slot */ + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); + set_bit(__TXGBE_DOWN, &adapter->state); + + adapter->fec_link_mode = TXGBE_PHY_FEC_AUTO; + adapter->cur_fec_link = TXGBE_PHY_FEC_AUTO; + + adapter->link_valid = true; + + if (hw->mac.type == txgbe_mac_sp) + adapter->desc_reserved = DESC_RESERVED; + else + adapter->desc_reserved = DESC_RESERVED_AML; + + bitmap_zero(adapter->limited_vlans, 4096); + + memset(adapter->i2c_eeprom, 0, sizeof(u8)*512); +out: + return err; +} + +/** + * txgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union txgbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + +#ifdef TXGBE_TXHEAD_WB + txgbe_setup_headwb_resources(tx_ring); +#endif + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i = 0, j = 0, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = txgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = txgbe_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx(XDP) Queue %u failed\n", j); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbe_free_tx_resources(adapter->tx_ring[i]); + while (j--) + txgbe_free_tx_resources(adapter->xdp_ring[j]); + return err; +} + +/** + * txgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; +#ifdef HAVE_XDP_BUFF_RXQ +#ifdef HAVE_XDP_FRAME_STRUCT +#ifndef HAVE_AF_XDP_ZC_SUPPORT + int err; +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_FRAME_STRUCT */ +#endif /* HAVE_XDP_BUFF_RXQ */ + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union txgbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + if (!rx_ring->q_vector) + return 0; +#ifdef HAVE_XDP_BUFF_RXQ + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, + rx_ring->q_vector->napi.napi_id < 0)) + goto err; +#ifndef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + goto err; + } +#endif /* HAVE_XDP_FRAME_STRUCT */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_BUFF_RXQ */ + rx_ring->xdp_prog = rx_ring->q_vector->adapter->xdp_prog; + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = txgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) { + continue; + } + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + +#if IS_ENABLED(CONFIG_FCOE) + err = txgbe_setup_fcoe_ddp_resources(adapter); + if (!err) +#endif + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * txgbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_isb_resources(struct txgbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * TXGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) + return -ENOMEM; + memset(adapter->isb_mem, 0, sizeof(u32) * TXGBE_ISB_MAX); + return 0; +} + +/** + * txgbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +void txgbe_free_isb_resources(struct txgbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * TXGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +#ifdef TXGBE_TXHEAD_WB +void txgbe_free_headwb_resources(struct txgbe_ring *ring) +{ + u8 headwb_size = 0; + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + + if (ring->q_vector) { + adapter = ring->q_vector->adapter; + hw = &adapter->hw; + if (hw->mac.type == txgbe_mac_sp) + return; + } else { + return; + } + + if (TXGBE_TXHEAD_WB == 1) + headwb_size = 16; + else if (TXGBE_TXHEAD_WB == 2) + headwb_size = 16; + else + headwb_size = 1; + + if (ring->headwb_mem) { + dma_free_coherent(ring->dev, sizeof(u32) * headwb_size, + ring->headwb_mem, ring->headwb_dma); + ring->headwb_mem = NULL; + } +} +#endif + +/** + * txgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void txgbe_free_tx_resources(struct txgbe_ring *tx_ring) +{ + txgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + +#ifdef TXGBE_TXHEAD_WB + txgbe_free_headwb_resources(tx_ring); +#endif +} + +/** + * txgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void txgbe_free_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + txgbe_free_tx_resources(adapter->xdp_ring[i]); +} + +/** + * txgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void txgbe_free_rx_resources(struct txgbe_ring *rx_ring) +{ + txgbe_clean_rx_ring(rx_ring); + + rx_ring->xdp_prog = NULL; +#ifdef HAVE_XDP_BUFF_RXQ + if (rx_ring->q_vector) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +#endif + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * txgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i; + +#if IS_ENABLED(CONFIG_FCOE) + txgbe_free_fcoe_ddp_resources(adapter); +#endif + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * txgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +#endif + +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > TXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; +#else + if ((new_mtu < 68) || (new_mtu > 9414)) + return -EINVAL; +#endif +#ifdef HAVE_XDP_SUPPORT + if (adapter->xdp_prog) { + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + + if (new_frame_size > txgbe_rx_bufsz(ring)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; + } + } + } +#endif + /* + * we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) +#else + (new_mtu > ETH_DATA_LEN)) +#endif + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + + return 0; } /** * txgbe_open - Called when a network interface is made active * @netdev: network interface device structure * - * Returns 0 on success, negative value on failure + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int txgbe_open(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int err; + + /*special for backplane flow*/ + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_DOWN; + + /* disallow open during test */ + if (test_bit(__TXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = txgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = txgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = txgbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + txgbe_configure(adapter); + + err = txgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); + if (err) + goto err_set_queues; + +#ifdef HAVE_PTP_1588_CLOCK + txgbe_ptp_init(adapter); +#endif + + txgbe_up_complete(adapter); + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) && defined(HAVE_UDP_TUNNEL_NIC_INFO) + udp_tunnel_nic_reset_ntf(netdev); +#else +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + txgbe_clear_vxlan_port(adapter); +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + udp_tunnel_get_rx_info(netdev); +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + vxlan_get_rx_port(netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#endif /* HAVE_UDP_ENC_RX_OFFLOAD && HAVE_UDP_TUNNEL_NIC_INFO */ + + return 0; + +err_set_queues: + txgbe_free_irq(adapter); +err_req_irq: + txgbe_free_isb_resources(adapter); +err_req_isb: + txgbe_free_all_rx_resources(adapter); + +err_setup_rx: + txgbe_free_all_tx_resources(adapter); +err_setup_tx: + txgbe_reset(adapter); + + return err; +} + +/** + * txgbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void txgbe_close_suspend(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + +#ifdef HAVE_PTP_1588_CLOCK + txgbe_ptp_suspend(adapter); +#endif + txgbe_disable_device(adapter); + + /* power down the optics for SFP+ fiber or mv phy */ + if(!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.disable_tx_laser); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); + + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); +} + +/** + * txgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int txgbe_close(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) { + txgbe_bp_close_protect(adapter); + } + +#ifdef HAVE_PTP_1588_CLOCK + txgbe_ptp_stop(adapter); +#endif + + txgbe_down(adapter); + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); + + txgbe_fdir_filter_exit(adapter); + memset(&adapter->ft_filter_info, 0, + sizeof(struct txgbe_5tuple_filter_info)); + + txgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int txgbe_resume(struct device *dev) +#else +static int txgbe_resume(struct pci_dev *pdev) +#endif /* USE_LEGACY_PM_SUPPORT */ +{ + struct txgbe_adapter *adapter; + struct net_device *netdev; + u32 err; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + txgbe_reset(adapter); + + rtnl_lock(); + + err = txgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = txgbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} + +#ifndef USE_LEGACY_PM_SUPPORT +/** + * txgbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int txgbe_freeze(struct device *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + txgbe_down(adapter); + txgbe_free_irq(adapter); + } + + txgbe_reset_interrupt_capability(adapter); + + return 0; +} + +/** + * txgbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int txgbe_thaw(struct device *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + txgbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = txgbe_request_irq(adapter); + if (err) + return err; + + txgbe_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif /* CONFIG_PM */ + +/* + * __txgbe_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) +static int __txgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + rtnl_lock(); + if (netif_running(netdev)) + txgbe_close_suspend(adapter); + rtnl_unlock(); + + txgbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + /* this won't stop link of managebility or WoL is enabled */ + txgbe_stop_mac_link_on_d3(hw); + + if (wufc) { + txgbe_set_rx_mode(netdev); + txgbe_configure_rx(adapter); + /* enable the optics for SFP+ fiber as we can WoL */ + TCALL(hw, mac.ops.enable_tx_laser); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & TXGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_MPE, TXGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, TXGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, TXGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + *enable_wake = !!wufc; + txgbe_release_hw_control(adapter); + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int txgbe_suspend(struct device *dev) +#else +static int txgbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +#endif /* USE_LEGACY_PM_SUPPORT */ +{ + int retval; + bool wake; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + + retval = __txgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +#ifndef USE_REBOOT_NOTIFIER +static void txgbe_shutdown(struct pci_dev *pdev) +{ + bool wake = 0; + + __txgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#endif +#ifdef HAVE_NDO_GET_STATS64 +/** + * txgbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces txgbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void txgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *txgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by txgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} +#else +/** + * txgbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *txgbe_get_stats(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + txgbe_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif + +/** + * txgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void txgbe_update_stats(struct txgbe_adapter *adapter) +{ +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; +#ifndef TXGBE_NO_LRO + u32 flushed = 0, coal = 0; +#endif + u8 pf_queue_offset = 0; + + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + +#ifndef TXGBE_NO_LRO + for (i = 0; i < adapter->num_q_vectors; i++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[i]; + if (!q_vector) + continue; + flushed += q_vector->lrolist.stats.flushed; + coal += q_vector->lrolist.stats.coal; + } + adapter->lro_stats.flushed = flushed; + adapter->lro_stats.coal = coal; + +#endif + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW); + + /* 8 register reads */ + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = rd32(hw, TXGBE_RDB_MPCNT(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + hwstats->pxontxc[i] += rd32(hw, TXGBE_RDB_PXONTXC(i)); + hwstats->pxofftxc[i] += + rd32(hw, TXGBE_RDB_PXOFFTXC(i)); + hwstats->pxonrxc[i] += rd32(hw, TXGBE_MAC_PXONRXC(i)); + } + + hwstats->gprc += rd32(hw, TXGBE_PX_GPRC); + + txgbe_update_xoff_received(adapter); + + hwstats->o2bgptc += rd32(hw, TXGBE_TDM_OS2BMC_CNT); + if (txgbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, TXGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, TXGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, TXGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, TXGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, TXGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, TXGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, TXGBE_PX_GOTC_MSB) << 32; + + + adapter->hw_rx_no_dma_resources += + rd32(hw, TXGBE_RDM_DRP_PKT); + hwstats->lxonrxc += rd32(hw, TXGBE_MAC_LXONRXC); +#ifdef HAVE_TX_MQ + hwstats->fdirmatch += rd32(hw, TXGBE_RDB_FDIR_MATCH); + hwstats->fdirmiss += rd32(hw, TXGBE_RDB_FDIR_MISS); +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + hwstats->fccrc += rd32(hw, TXGBE_FCCRC); + hwstats->fclast += rd32(hw, TXGBE_FCLAST); + hwstats->fcoerpdc += rd32(hw, TXGBE_FCOERPDC); + hwstats->fcoeprc += rd32(hw, TXGBE_FCOEPRC); + hwstats->fcoeptc += rd32(hw, TXGBE_FCOEPTC); + hwstats->fcoedwrc += rd32(hw, TXGBE_FCOEDWRC); + hwstats->fcoedwtc += rd32(hw, TXGBE_FCOEDWTC); + /* Add up per cpu counters for total ddp alloc fail */ + if (adapter->fcoe.ddp_pool) { + struct txgbe_fcoe *fcoe = &adapter->fcoe; + struct txgbe_fcoe_ddp_pool *ddp_pool; + unsigned int cpu; + u64 noddp = 0, noddp_ext_buff = 0; + for_each_possible_cpu(cpu) { + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + noddp += ddp_pool->noddp; + noddp_ext_buff += ddp_pool->noddp_ext_buff; + } + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; + } + +#endif /* CONFIG_FCOE */ + + bprc = rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + hwstats->rdpc += rd32(hw, TXGBE_RDB_PKT_CNT); + hwstats->rddc += rd32(hw, TXGBE_RDB_DRP_CNT); + hwstats->psrpc += rd32(hw, TXGBE_PSR_PKT_CNT); + hwstats->psrdc += rd32(hw, TXGBE_PSR_DBG_DRP_CNT); + hwstats->untag += rd32(hw, TXGBE_RSEC_LSEC_UNTAG_PKT); + hwstats->tdmpc += rd32(hw, TXGBE_TDM_PKT_CNT); + hwstats->tdmdc += rd32(hw, TXGBE_TDM_DRP_CNT); + hwstats->tdbpc += rd32(hw, TXGBE_TDB_OUT_PKT_CNT); + + pf_queue_offset = adapter->ring_feature[RING_F_VMDQ].offset * + (adapter->ring_feature[RING_F_RSS].mask + 1); + + for (i = pf_queue_offset; i < 128; i++) + hwstats->mprc += rd32(hw, TXGBE_PX_MPRC(i)); + + hwstats->roc += rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, TXGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, TXGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, TXGBE_PX_GPTC); + hwstats->mptc += rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + net_stats->rx_missed_errors = total_mpc; + +} + +#ifdef HAVE_TX_MQ +/** + * txgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_fdir_reinit_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (txgbe_reinit_fdir_tables(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__TXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); + /* re-enable flow director interrupts */ + wr32m(hw, TXGBE_PX_MISC_IEN, + TXGBE_PX_MISC_IEN_FLOW_DIR, TXGBE_PX_MISC_IEN_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); + } +} +#endif /* HAVE_TX_MQ */ + +void txgbe_irq_rearm_queues(struct txgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + wr32(&adapter->hw, TXGBE_PX_IMC(0), mask); + wr32(&adapter->hw, TXGBE_PX_ICS(0), mask); + + mask = (qmask >> 32); + wr32(&adapter->hw, TXGBE_PX_IMC(1), mask); + wr32(&adapter->hw, TXGBE_PX_ICS(1), mask); +} + +/** + * txgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) +{ + int i; + u64 eics = 0; + + /* If we're down or resetting, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); + } + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct txgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rx.ring || qv->tx.ring) + eics |= BIT_ULL(i); + } + } + /* Cause software interrupt to ensure rings are cleaned */ + txgbe_irq_rearm_queues(adapter, eics); + +} + +/** + * txgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + u32 reg; + u32 __maybe_unused i = 1; + +#ifndef POLL_LINK_STATUS + if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + msleep(20); +#endif + + link_speed = TXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + +#ifndef POLL_LINK_STATUS + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + TXGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + } + + for(i = 0;i < 3;i++){ + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + msleep(10); + } +#else + if (adapter->link_up == link_up && adapter->link_speed == link_speed) + return; +#endif + + adapter->link_up = link_up; + adapter->link_speed = link_speed; + +#ifdef HAVE_DCBNL_IEEE + if (adapter->txgbe_ieee_pfc) + pfc_en |= !!(adapter->txgbe_ieee_pfc->pfc_en); + +#endif + if (link_up && !((adapter->flags & TXGBE_FLAG_DCB_ENABLED) && pfc_en)) { + TCALL(hw, mac.ops.fc_enable); + txgbe_set_rx_drop_en(adapter); + } + + if (link_up) { + +#ifdef HAVE_PTP_1588_CLOCK + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_start_cyclecounter(adapter); + +#endif + if (hw->mac.type == txgbe_mac_aml40) { + if (!(hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_reconfig_mac(hw); + + if (link_speed & TXGBE_LINK_SPEED_40GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + } else if (hw->mac.type == txgbe_mac_aml) { + if (!(hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_reconfig_mac(hw); + + if (link_speed & TXGBE_LINK_SPEED_25GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + } else { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_1G); + } + + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + } else { + if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (link_speed & (TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | TXGBE_LINK_SPEED_10_FULL)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, TXGBE_MAC_RX_CFG); + wr32(hw, TXGBE_MAC_RX_CFG, reg); + wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); + } + } + + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); +#if IS_ENABLED(CONFIG_FCOE) + u8 fcoe_tc = txgbe_fcoe_get_tc(adapter); + bool fcoe_en = !!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED); +#endif /* CONFIG_FCOE */ + + if (hw->mac.dmac_config.link_speed != link_speed || +#if IS_ENABLED(CONFIG_FCOE) + hw->mac.dmac_config.fcoe_tc != fcoe_tc || + hw->mac.dmac_config.fcoe_en != fcoe_en || +#endif /* CONFIG_FCOE */ + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; +#if IS_ENABLED(CONFIG_FCOE) + hw->mac.dmac_config.fcoe_en = fcoe_en; + hw->mac.dmac_config.fcoe_tc = fcoe_tc; +#endif /* CONFIG_FCOE */ + TCALL(hw, mac.ops.dmac_config); + } + } +} + +static void txgbe_update_default_up(struct txgbe_adapter *adapter) +{ + u8 up = 0; +#ifdef HAVE_DCBNL_IEEE + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = 0, + }; + up = dcb_getapp(netdev, &app); +#endif + +#if IS_ENABLED(CONFIG_FCOE) + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#else + adapter->default_up = up; +#endif +} + +/** + * txgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; +#ifdef HAVE_VIRTUAL_STATION + struct net_device *upper; + struct list_head *iter; +#endif + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) { + return; + } + + adapter->flags2 &= ~TXGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, TXGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(TXGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, TXGBE_RDB_RFCC)); + + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + adapter->speed = SPEED_40000; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + adapter->speed = SPEED_25000; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + adapter->speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + default: + adapter->speed = SPEED_1000; + break; + } + +#ifndef POLL_LINK_STATUS + if (hw->mac.type == txgbe_mac_aml) + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); +#endif + + e_info(drv, "NIC Link is Up %s, Flow Control: %s%s\n", + (link_speed == TXGBE_LINK_SPEED_40GB_FULL ? + "40 Gbps" : + (link_speed == TXGBE_LINK_SPEED_25GB_FULL ? + "25 Gbps" : + (link_speed == TXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == TXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == TXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == TXGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed")))))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None"))), + ((hw->mac.type == txgbe_mac_aml && link_speed == TXGBE_LINK_SPEED_25GB_FULL) ? + ((adapter->cur_fec_link == TXGBE_PHY_FEC_BASER) ? ", FEC: BASE-R" :\ + (adapter->cur_fec_link == TXGBE_PHY_FEC_RS) ? ", FEC: RS" :\ + (adapter->cur_fec_link == TXGBE_PHY_FEC_OFF) ? ", FEC: OFF":"") : "")); + + if (!adapter->backplane_an && + (hw->dac_sfp || + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) + && hw->mac.type == txgbe_mac_sp) + txgbe_enable_rx_adapter(hw); + + if (adapter->tx_unidir_mode) { + wr32m(hw, 0x11004, BIT(10), BIT(10)); + wr32m(hw, 0x11004, BIT(0), BIT(0)); + e_dev_info("Enable loopback and disable rx : %x\n.", + rd32(hw, 0x11004)); + } + txgbe_check_vlan_rate_limit(adapter); + netif_carrier_on(netdev); + txgbe_check_vf_rate_limit(adapter); + + netif_tx_wake_all_queues(netdev); +#ifdef HAVE_VIRTUAL_STATION + /* enable any upper devices */ + rtnl_lock(); + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + } + rtnl_unlock(); +#endif + /* update the default user priority for VFs */ + txgbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + //txgbe_ping_all_vfs(adapter); + txgbe_ping_all_vfs_with_link_status(adapter, true); +} + +static void txgbe_link_down_flush_tx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, TXGBE_MAC_RX_CFG_LM); + + mdelay(20); + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_LM, 0); +} + +/** + * txgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + if (hw->mac.type == txgbe_mac_sp) + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) + txgbe_bp_down_event(adapter); + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_start_cyclecounter(adapter); + +#endif + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) + adapter->an_done = false; + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + txgbe_link_down_flush_tx(adapter); + /* ping all the active vfs to let them know link has changed */ + //txgbe_ping_all_vfs(adapter); + txgbe_ping_all_vfs_with_link_status(adapter, false); +} + +static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + if (xdp_ring->next_to_use != xdp_ring->next_to_clean) + return true; + } + return false; +} + +static bool txgbe_vf_tx_pending(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + TXGBE_PX_TR_RPn(q_per_pool, i, j)); + t = rd32(hw, + TXGBE_PX_TR_WPn(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * txgbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_flush_tx(struct txgbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (txgbe_ring_tx_pending(adapter) || + txgbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with " + "pending Tx work\n"); + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void txgbe_issue_vf_flr(struct txgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + + +static void txgbe_spoof_check(struct txgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check if in non-IOV mode */ + if (adapter->num_vfs == 0) + return; + ssvpc = rd32(&adapter->hw, TXGBE_TDM_SEC_DRP); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +#endif /* CONFIG_PCI_IOV */ + +/** + * txgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) +{ + u32 __maybe_unused value = 0; + struct txgbe_hw *hw = &adapter->hw; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (hw->mac.type == txgbe_mac_sp) + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) + txgbe_bp_watchdog_event(adapter); + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + txgbe_e56_bp_watchdog_event(adapter); + +#ifndef POLL_LINK_STATUS + if(BOND_CHECK_LINK_MODE == 1){ + value = rd32(hw, 0x14404); + value = value & 0x1; + if(value == 1) + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } + if (!(adapter->flags2 & TXGBE_FLAG2_LINK_DOWN)) + txgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + txgbe_watchdog_link_is_up(adapter); + else + txgbe_watchdog_link_is_down(adapter); +#endif + +#ifdef CONFIG_PCI_IOV + txgbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + + + txgbe_update_stats(adapter); + + txgbe_watchdog_flush_tx(adapter); +} + +static void txgbe_phy_event_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (!(adapter->flags3 & TXGBE_FLAG3_PHY_EVENT)) + return; + + adapter->flags3 &= ~TXGBE_FLAG3_PHY_EVENT; + + mutex_lock(&adapter->e56_lock); + rdata = rd32_ephy(hw, E56PHY_INTR_0_ADDR); + if (rdata & E56PHY_INTR_0_IDLE_ENTRY1) { + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + } + + rdata = rd32_ephy(hw, E56PHY_INTR_1_ADDR); + if (rdata & E56PHY_INTR_1_IDLE_EXIT1) { + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + } + mutex_unlock(&adapter->e56_lock); +} + +/** + * txgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter - the txgbe adapter structure + **/ +static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & TXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + if (adapter->sfp_poll_time && + time_after(adapter->sfp_poll_time, jiffies)) + return; /* If not yet time to poll for SFP */ + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->sfp_poll_time = jiffies + TXGBE_SFP_POLL_JIFFIES - 1; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + err = TXGBE_ERR_SFP_NOT_PRESENT; + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + goto sfp_out; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + err = TXGBE_ERR_SFP_NOT_PRESENT; + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + goto sfp_out; + } + } + + /* wait for sfp module ready*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + msleep(200); + + adapter->eeprom_type = 0; + adapter->eeprom_len = 0; + memset(adapter->i2c_eeprom, 0, sizeof(u8)*512); + + err = TCALL(hw, phy.ops.identify_sfp); + if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == TXGBE_ERR_SFP_NOT_PRESENT) { + /* If no cable is present, then we need to reset + * the next time we find a good cable. */ + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + } + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + + err = hw->mac.ops.setup_sfp(hw); + + hw->phy.autoneg_advertised = 0; + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == TXGBE_ERR_SFP_NOT_SUPPORTED) && + adapter->netdev_registered) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + } +} + +/** + * txgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter - the txgbe adapter structure + **/ +static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 speed; + bool autoneg = false; + u16 value; + u32 gssr = hw->phy.phy_semaphore_mask; + u8 device_type = hw->subsystem_device_id & 0xF0; + + if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + + if (device_type == TXGBE_ID_XAUI) { + /* clear ext phy int status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); + if (value & 0x400) + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + if (!(value & 0x800)) { + return; + } + } + + if(device_type == TXGBE_ID_MAC_XAUI || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + device_type == TXGBE_ID_SFI_XAUI)) { + speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (device_type == TXGBE_ID_MAC_SGMII) { + speed = TXGBE_LINK_SPEED_1GB_FULL; + } else { + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { + TCALL(hw, mac.ops.get_link_capabilities, &speed, &autoneg); + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + speed = TXGBE_LINK_SPEED_25GB_FULL; + else if (speed & TXGBE_LINK_SPEED_10GB_FULL) + speed = TXGBE_LINK_SPEED_10GB_FULL; + } + } + } + + /* firmware is configuring phy now, delay host driver config action */ + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + if (TCALL(hw, mac.ops.acquire_swfw_sync, gssr) != 0) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + e_warn(probe, "delay config ephy\n"); + return; + } + } + + TCALL(hw, mac.ops.setup_link, speed, false); + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + TCALL(hw, mac.ops.release_swfw_sync, gssr); + + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; +} + +static void txgbe_sfp_reset_eth_phy_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 speed; + bool linkup = true; + u32 i = 0; + + if (!(adapter->flags2 & TXGBE_FLAG_NEED_ETH_PHY_RESET)) + return; + + adapter->flags2 &= ~TXGBE_FLAG_NEED_ETH_PHY_RESET; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + return; + + TCALL(hw, mac.ops.check_link, &speed, &linkup, false); + if (!linkup) { + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, + 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + } +} + +/** + * txgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void txgbe_service_timer(struct timer_list *t) +{ + struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + struct txgbe_hw *hw = &adapter->hw; + u32 val = 0; + + /* poll faster when waiting for link */ + if (adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + next_event_offset = HZ / 10; + else if (BOND_CHECK_LINK_MODE == 1) + next_event_offset = HZ / 100; + else + next_event_offset = HZ / 10; + } else + next_event_offset = HZ * 2; + + /* record which func to provoke PCIE recovery */ + if (rd32(&adapter->hw, TXGBE_MIS_PF_SM) == 1) { + val = rd32m(&adapter->hw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN0_UP | + TXGBE_MIS_PRB_CTL_LAN1_UP); + if (val & TXGBE_MIS_PRB_CTL_LAN0_UP) { + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "txgbe_service_timer: set recover on Lan0\n"); + } + } else if (val & TXGBE_MIS_PRB_CTL_LAN1_UP) { + if (hw->bus.lan_id == 1) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "txgbe_service_timer: set recover on Lan1\n"); + } + } + } + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + txgbe_service_event_schedule(adapter); + if ((hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + queue_work(txgbe_wq, &adapter->sfp_sta_task); + } +} + +static void txgbe_sfp_phy_status_work(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + sfp_sta_task); + struct txgbe_hw *hw = &adapter->hw; + u16 data = 0; + bool status = false; + s32 i2c_status; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return; + + if ((hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1)) { + i2c_status = TCALL(hw, phy.ops.read_i2c_sfp_phy, + 0x0a, + &data); + + if (i2c_status != 0) + goto RELEASE_SEM; + + /* Avoid read module info and read f2c module internal phy + * may cause i2c controller read reg data err + */ + if ((data & 0x83ff) != 0 || data == 0) + goto RELEASE_SEM; + + if ((data & TXGBE_I2C_PHY_LOCAL_RX_STATUS) && + (data & TXGBE_I2C_PHY_REMOTE_RX_STATUS)) + status = true; + else + status = false; + }else if ((hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + i2c_status = TCALL(hw, phy.ops.read_i2c_sfp_phy, + 0x8008, + &data); + + if (i2c_status != 0) + goto RELEASE_SEM; + + if (data & TXGBE_I2C_10G_SFP_LINK_STATUS) + status = true; + else + status = false; + } + +RELEASE_SEM: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + /* sync sfp status to firmware */ + wr32(hw, TXGBE_TSC_LSEC_PKTNUM0, data | 0x80000000); + + if (hw->f2c_mod_status != status) { + hw->f2c_mod_status = status; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } +} + +static void txgbe_amlit_temp_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = 0, val = 0; + s32 status = 0; + int temp; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + if (!netif_carrier_ok(adapter->netdev)) + return; + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return; + + if (!(temp - adapter->amlite_temp > 4 || + adapter->amlite_temp - temp > 4)) + return; + + adapter->amlite_temp = temp; + val = rd32(hw, TXGBE_CFG_PORT_ST); + if (val & TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL) + link_speed = TXGBE_LINK_SPEED_40GB_FULL; + else if (val & TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL) + link_speed = TXGBE_LINK_SPEED_25GB_FULL; + else + link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + mutex_lock(&adapter->e56_lock); + if (hw->mac.type == txgbe_mac_aml) + txgbe_temp_track_seq(hw, link_speed); + else if (hw->mac.type == txgbe_mac_aml40) + txgbe_temp_track_seq_40g(hw, link_speed); + mutex_unlock(&adapter->e56_lock); + +} + +#ifdef POLL_LINK_STATUS +/** + * txgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void txgbe_link_check_timer(struct timer_list *t) +{ + struct txgbe_adapter *adapter = from_timer(adapter, t, link_check_timer); + unsigned long next_event_offset = HZ/100; + struct txgbe_hw __maybe_unused *hw = &adapter->hw; + + mod_timer(&adapter->link_check_timer, next_event_offset + jiffies); + if(test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + txgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + txgbe_watchdog_link_is_up(adapter); + else + txgbe_watchdog_link_is_down(adapter); + +} +#endif + +static void txgbe_reset_subtask(struct txgbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + union txgbe_tx_desc *tx_desc; + int i, j; + u32 desc_error[4] = {0, 0, 0, 0}; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + struct txgbe_tx_buffer *tx_buffer; + u32 size; + + if (!(adapter->flags2 & (TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_DEV_RESET_REQUESTED | + TXGBE_FLAG2_GLOBAL_RESET_REQUESTED | + TXGBE_FLAG2_RESET_INTR_RECEIVED | + TXGBE_FLAG2_DMA_RESET_REQUESTED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state)) + return; + + if (netif_msg_tx_err(adapter)) { + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + + /* check tdm fatal error */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + e_err(tx_err, "TDM fatal error queue[%d]", i); + tx_ring = adapter->tx_ring[i]; + e_warn(tx_err, "queue[%d] RP = 0x%x\n", i , + rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_err(pktdata, "tx buffer[%d][%d]: \n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->va, size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->skb, sizeof(struct sk_buff), true); + } + } + netif_stop_subqueue(tx_ring->netdev, i); + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + txgbe_do_lan_reset(adapter); + } + } + } + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_PF_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_DMA_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_DMA_RESET_REQUESTED; + } + + if (adapter->flags2 & TXGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~TXGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, TXGBE_MIS_RST_ST, + TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) { + adapter->hw.reset_type = TXGBE_SW_RESET; + /* errata 7 */ + if (txgbe_mng_present(&adapter->hw) && + adapter->hw.revision_id == TXGBE_SP_MPW) + adapter->flags2 |= + TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED; + } else if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) + adapter->hw.reset_type = TXGBE_GLOBAL_RESET; + adapter->hw.force_full_reset = TRUE; + txgbe_reinit_locked(adapter); + adapter->hw.force_full_reset = FALSE; + goto unlock; + } + + if (reset_flag & TXGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to up*/ + /*txgbe_dump(adapter);*/ + if (txgbe_mng_present(&adapter->hw)) { + txgbe_reset_hostif(&adapter->hw); + } else + wr32m(&adapter->hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_SW_RST, TXGBE_MIS_RST_SW_RST); + + } else if (reset_flag & TXGBE_FLAG2_PF_RESET_REQUESTED) { + /*debug to up*/ + /*txgbe_dump(adapter);*/ + txgbe_reinit_locked(adapter); + } else if (reset_flag & TXGBE_FLAG2_DMA_RESET_REQUESTED) { + txgbe_reinit_locked_dma_reset(adapter); + } else if (reset_flag & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to up*/ + /*txgbe_dump(adapter);*/ + pci_save_state(adapter->pdev); + if (txgbe_mng_present(&adapter->hw)) { + txgbe_reset_hostif(&adapter->hw); + } else + wr32m(&adapter->hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_GLOBAL_RST, + TXGBE_MIS_RST_GLOBAL_RST); + } + +unlock: + rtnl_unlock(); +} + +static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) +{ + bool status; + + if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + txgbe_print_tx_hang_status(adapter); + txgbe_dump_all_ring_desc(adapter); + + wr32m(&adapter->hw, TXGBE_MIS_PF_SM, TXGBE_MIS_PF_SM_SM, 0); + + if ((TXGBE_PCIE_RECOVER == 1) && !(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) { + status = txgbe_check_recovery_capability(adapter->pdev); + if (status) { + e_info(probe, "do recovery\n"); + txgbe_pcie_do_recovery(adapter->pdev); + } else { + e_err(drv, "This platform can't support pcie recovery, skip it\n"); + } + } + + adapter->flags2 &= ~TXGBE_FLAG2_PCIE_NEED_RECOVER; +} +#if 0 +static void txgbe_swfw_mbox_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 bi; + u32 hdr_size = sizeof(struct txgbe_hic_hdr); + u16 buf_len; + u32 dword_len; + + if (!(adapter->flags & TXGBE_FLAG_SWFW_MBOX_NOTIFY)) + return; + + printk("recv a mbox notify\n"); + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + adapter->swfw_mbox_buf[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&adapter->swfw_mbox_buf[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct txgbe_hic_hdr *)adapter->swfw_mbox_buf)->buf_len; + if (buf_len == 0) + return; + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + adapter->swfw_mbox_buf[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, + bi); + TXGBE_LE32_TO_CPUS(&adapter->swfw_mbox_buf[bi]); + } + + printk("recv mbox data, store in swfw_mbox_buf\n"); + + + /* amlite: check if it is a reply, then inform */ + adapter->flags2 |= TXGBE_FLAG2_SWFW_MBOX_REPLY; + + adapter->flags &= ~TXGBE_FLAG_SWFW_MBOX_NOTIFY; +} +#endif +static void txgbe_tx_queue_clear_error_task(struct txgbe_adapter *adapter) { + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + u32 desc_error[4] = {0, 0, 0, 0}; + union txgbe_tx_desc *tx_desc; + u32 i, j; + struct txgbe_tx_buffer *tx_buffer; + u32 size; + + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + tx_ring = adapter->tx_ring[i]; + netif_stop_subqueue(tx_ring->netdev, i); + msec_delay(10); + + e_err(tx_err, "queue[%d] RP = 0x%x\n", i , + rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_warn(pktdata, "tx buffer[%d][%d]: \n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->va, size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->skb, sizeof(struct sk_buff), true); + } + } + + wr32(hw, TXGBE_TDM_DESC_NONFATAL(i / 32), BIT(i % 32)); + + txgbe_clean_tx_ring(tx_ring); + + txgbe_configure_tx_ring(adapter, tx_ring); + netif_start_subqueue(tx_ring->netdev, i); + } + } +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void txgbe_service_task(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + service_task); + struct txgbe_hw *hw = &adapter->hw; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + txgbe_down(adapter); + rtnl_unlock(); + } + txgbe_service_event_complete(adapter); + return; + } +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +#ifndef HAVE_UDP_TUNNEL_NIC_INFO + if (adapter->flags2 & TXGBE_FLAG2_VXLAN_REREG_NEEDED) { + rtnl_lock(); + adapter->flags2 &= ~TXGBE_FLAG2_VXLAN_REREG_NEEDED; +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + udp_tunnel_get_rx_info(adapter->netdev); +#else + vxlan_get_rx_port(adapter->netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + rtnl_unlock(); + } +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + + txgbe_check_pcie_subtask(adapter); +/* txgbe_swfw_mbox_subtask(adapter); */ + txgbe_reset_subtask(adapter); + txgbe_phy_event_subtask(adapter); + txgbe_sfp_detection_subtask(adapter); + if (!(hw->mac.type == txgbe_mac_sp || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_watchdog_subtask(adapter); + txgbe_sfp_link_config_subtask(adapter); + txgbe_sfp_reset_eth_phy_subtask(adapter); + txgbe_check_overtemp_subtask(adapter); + txgbe_watchdog_subtask(adapter); +#ifdef HAVE_TX_MQ + txgbe_fdir_reinit_subtask(adapter); +#endif + txgbe_check_hang_subtask(adapter); +#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) { + txgbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + txgbe_ptp_rx_hang(adapter); + } +#endif /* HAVE_PTP_1588_CLOCK */ + txgbe_tx_queue_clear_error_task(adapter); + txgbe_amlit_temp_subtask(adapter); + + txgbe_service_event_complete(adapter); +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +#ifndef ETH_P_TEB +#define ETH_P_TEB 0x6558 +#endif +static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; +#endif + u8 l4_prot = 0; + u8 ptype = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP || + tun_prot == IPPROTO_IPV6) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= TXGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + /* fixme: VXLAN-GPE neither ETHER nor IP */ +#ifdef ENCAP_TYPE_ETHER + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } +#endif + } else if (tun_prot == IPPROTO_GRE) { + hdr.raw = (void *)inner_ip_hdr(skb); +#ifdef ENCAP_TYPE_ETHER + if (skb->inner_protocol == htons(ETH_P_IP) || + skb->inner_protocol == htons(ETH_P_IPV6)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } +#endif + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + ptype |= TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = TXGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + + ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#endif /* NETIF_F_IPV6_CSUM */ + case __constant_htons(ETH_P_1588): + ptype = TXGBE_PTYPE_L2_TS; + goto exit; + case __constant_htons(ETH_P_FIP): + ptype = TXGBE_PTYPE_L2_FIP; + goto exit; + case __constant_htons(TXGBE_ETH_P_LLDP): + ptype = TXGBE_PTYPE_L2_LLDP; + goto exit; + case __constant_htons(TXGBE_ETH_P_CNM): + ptype = TXGBE_PTYPE_L2_CNM; + goto exit; + case __constant_htons(ETH_P_PAE): + ptype = TXGBE_PTYPE_L2_EAPOL; + goto exit; + case __constant_htons(ETH_P_ARP): + ptype = TXGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = TXGBE_PTYPE_L2_MAC; + goto exit; + } +#ifdef HAVE_ENCAP_TSO_OFFLOAD + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= TXGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= TXGBE_PTYPE_TYP_UDP; + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + ptype |= TXGBE_PTYPE_TYP_SCTP; + break; +#endif /* HAVE_SCTP */ + default: + ptype |= TXGBE_PTYPE_TYP_IP; + break; + } + +exit: + return txgbe_decode_ptype(ptype); +} + +static int txgbe_tso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len, txgbe_dptype dptype) +{ +#ifndef NETIF_F_TSO + return 0; +#else + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + bool enc = skb->encapsulation; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ +#ifdef NETIF_F_TSO6 + struct ipv6hdr *ipv6h; +#endif + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); +#else + iph = ip_hdr(skb); +#endif + if (iph->version == 4) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); +#else + tcph = tcp_hdr(skb); +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_IPV4 | + TXGBE_TX_FLAGS_CC; + +#ifdef NETIF_F_TSO6 + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { +#ifdef HAVE_ENCAP_TSO_OFFLOAD + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); +#else + ipv6h = ipv6_hdr(skb); + tcph = tcp_hdr(skb); +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_CC; +#endif /* NETIF_F_TSO6 */ + } + + /* compute header lengths */ +#ifdef HAVE_ENCAP_TSO_OFFLOAD + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; +#else + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << TXGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; +#ifdef MAX_9732_SIZE + if (skb_shinfo(skb)->gso_size > (9428 - *hdr_len)) + mss_l4len_idx |= (9428 - *hdr_len) << TXGBE_TXD_MSS_SHIFT; + else + { + mss_l4len_idx |= skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; + } +#endif + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (enc) { + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else + vlan_macip_lens = skb_network_header_len(skb) >> 1; +#else + vlan_macip_lens = skb_network_header_len(skb) >> 1; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_HW_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); +#endif + + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +#endif /* !NETIF_F_TSO */ +} + +static void txgbe_tx_csum(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, txgbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + u8 tun_prot = 0; +#endif + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & TXGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); + l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + break; + default: + break; + } + +#else /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_prot = ip_hdr(skb)->protocol; + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + break; +#endif /* NETIF_F_IPV6_CSUM */ + default: + break; + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_prot) { + case IPPROTO_TCP: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + TXGBE_TXD_L4LEN_SHIFT; +#else + mss_l4len_idx = tcp_hdrlen(skb) << + TXGBE_TXD_L4LEN_SHIFT; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; +#endif /* HAVE_SCTP */ + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= TXGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= TXGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ +#ifndef HAVE_ENCAP_TSO_OFFLOAD + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; +#endif /* !HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_HW_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); +#endif + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +u32 txgbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = TXGBE_TXD_DTYP_DATA | + TXGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_HW_VLAN, + TXGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSO, + TXGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSTAMP, + TXGBE_TXD_MAC_TSTAMP); + + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_LINKSEC, + TXGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void txgbe_tx_olinfo_status(union txgbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << TXGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_CSUM, + TXGBE_TXD_L4CS); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_IPV4, + TXGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_OUTER_IPV4, + TXGBE_TXD_EIPCS); + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_CC, + TXGBE_TXD_CC); + + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_IPSEC, + TXGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(txgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size) +{ + if (likely(txgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __txgbe_maybe_stop_tx(tx_ring, size); +} + +static int txgbe_tx_map(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + const u8 hdr_len) +{ + struct txgbe_adapter *adapter = netdev_priv(tx_ring->netdev); + struct sk_buff *skb = first->skb; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = txgbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = TXGBE_TX_DESC(tx_ring, i); + + txgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + +#if IS_ENABLED(CONFIG_FCOE) + if (tx_flags & TXGBE_TX_FLAGS_FCOE) { + if (data_len < sizeof(struct fcoe_crc_eof)) { + size -= sizeof(struct fcoe_crc_eof) - data_len; + data_len = 0; + } else { + data_len -= sizeof(struct fcoe_crc_eof); + } + } +#endif /* CONFIG_FCOE */ + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + tx_buffer->va = skb->data; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) { + tx_buffer->va = NULL; + goto dma_error; + } + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > TXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ TXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += TXGBE_MAX_DATA_PER_TXD; + size -= TXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + +#if IS_ENABLED(CONFIG_FCOE) + size = min_t(unsigned int, data_len, skb_frag_size(frag)); +#else + size = skb_frag_size(frag); +#endif + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_buffer->va = skb_frag_address_safe(frag); + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | TXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + skb_tx_timestamp(skb); + +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + /* set next_eop for amlite tx head wb*/ +#ifdef TXGBE_TXHEAD_WB + first->next_eop = i; +#endif + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + txgbe_maybe_stop_tx(tx_ring, adapter->desc_reserved + DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more() || + (txgbe_desc_unused(tx_ring) <= (tx_ring->count >> 1))) { + writel(i, tx_ring->tail); +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + tx_buffer->va = NULL; + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static void txgbe_atr(struct txgbe_ring *ring, + struct txgbe_tx_buffer *first, + txgbe_dptype dptype) +{ + struct txgbe_q_vector *q_vector = ring->q_vector; + union txgbe_atr_hash_dword input = { .dword = 0 }; + union txgbe_atr_hash_dword common = { .dword = 0 }; + union network_header hdr; + struct tcphdr *th; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (dptype.etype) { + if (TXGBE_PTYPE_TYP_TCP != TXGBE_PTYPE_TYPL4(dptype.ptype)) + return; + hdr.raw = (void *)skb_inner_network_header(first->skb); + th = inner_tcp_hdr(first->skb); + } else +#endif + { + if (TXGBE_PTYPE_PKT_IP != TXGBE_PTYPE_PKT(dptype.ptype) || + TXGBE_PTYPE_TYP_TCP != TXGBE_PTYPE_TYPL4(dptype.ptype)) + return; + hdr.raw = (void *)skb_network_header(first->skb); + th = tcp_hdr(first->skb); + } + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = htons((u16)dptype.ptype); + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & TXGBE_TX_FLAGS_SW_VLAN) + common.port.src ^= th->dest ^ first->skb->protocol; + else if (first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) +#if defined(NETIF_F_HW_VLAN_CTAG_TX) || defined(NETIF_F_HW_VLAN_STAG_TX) + common.port.src ^= th->dest ^ first->skb->vlan_proto; +#else + common.port.src ^= th->dest ^ htons(ETH_P_8021Q); +#endif + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (TXGBE_PTYPE_PKT_IPV6 & TXGBE_PTYPE_PKT(dptype.ptype)) { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } else { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + txgbe_fdir_add_signature_filter(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + + +#ifdef HAVE_XDP_SUPPORT +static int txgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + bool need_reset; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + if (adapter->xdp_prog && prog) { + e_dev_err("XDP can't be active at the same time"); + return -EBUSY; + } + + /* verify txgbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + + if (frame_size > txgbe_rx_bufsz(ring)) + return -EINVAL; + } + old_prog = adapter->xdp_prog; + need_reset = (!!prog != !!old_prog); + + if(need_reset) { + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(adapter); + + if (nr_cpu_ids > MAX_XDP_QUEUES) + static_branch_inc(&txgbe_xdp_locking_key); + } + + old_prog = xchg(&adapter->xdp_prog, prog); + + /* If transitioning XDP modes reconfigure rings */ + if (need_reset) { + if (!adapter->xdp_prog && adapter->old_rss_limit) { + adapter->ring_feature[RING_F_FDIR].limit = adapter->old_rss_limit; + adapter->ring_feature[RING_F_RSS].limit = adapter->old_rss_limit; + } + + if(!prog) { + if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && adapter->lro_before_xdp) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + dev->features |= NETIF_F_LRO; + } + } else { + adapter->lro_before_xdp = !!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED); + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + e_dev_err("XDP not support LRO"); + dev->features &= ~NETIF_F_LRO; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } + } + + if (adapter->xdp_prog) { + if (adapter->num_rx_queues > TXGBE_MAX_XDP_RSS_INDICES) { + adapter->old_rss_limit = adapter->ring_feature[RING_F_RSS].limit; + adapter->ring_feature[RING_F_FDIR].limit = TXGBE_MAX_XDP_RSS_INDICES; + adapter->ring_feature[RING_F_RSS].limit = TXGBE_MAX_XDP_RSS_INDICES; + e_dev_info("limit tx rx ring to 32 " + "because hw limit and xdpring take up half of the txring"); + } else { + adapter->old_rss_limit = 0; + } + } + + txgbe_clear_interrupt_scheme(adapter); + + txgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + txgbe_open(dev); + + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + if (old_prog) + bpf_prog_put(old_prog); + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) { + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->xdp_ring[i]->xsk_pool) { +#ifdef HAVE_NDO_XSK_WAKEUP + (void)txgbe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX); +#else + (void)txgbe_xsk_async_xmit(adapter->netdev, i); +#endif + } + } + } +#endif + if (adapter->xdp_prog) + e_dev_info("xdp program is setup"); + else + e_dev_info("xdp program not load"); + + return 0; +} +#ifdef HAVE_NDO_BPF +static int txgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) +#else +static int txgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +#endif +{ +#if defined(HAVE_XDP_QUERY_PROG) || defined(HAVE_AF_XDP_ZC_SUPPORT) + struct txgbe_adapter *adapter = netdev_priv(dev); +#endif + switch (xdp->command) { + case XDP_SETUP_PROG: + return txgbe_xdp_setup(dev, xdp->prog); +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: +#ifndef NO_NETDEV_BPF_PROG_ATTACHED + xdp->prog_attached = !!(adapter->xdp_prog); +#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */ + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + return 0; +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT + case XDP_SETUP_XSK_POOL: +#ifndef HAVE_NETDEV_BPF_XSK_POOL + return txgbe_xsk_umem_setup(adapter, xdp->xsk.umem, + xdp->xsk.queue_id); +#else + return txgbe_xsk_umem_setup(adapter, xdp->xsk.pool, + xdp->xsk.queue_id); +#endif /* HAVE_NETDEV_BPF_XSK_POOL */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ + default: + return -EINVAL; + } +} +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +static int txgbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +#else +static int txgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_ring *ring; +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#ifndef HAVE_XDP_NO_RETURN_RX + int drops = 0; +#endif + int nxmit = 0; + int i; +#else + int err; +#endif + if (unlikely(test_bit(__TXGBE_DOWN, &adapter->state))) + return -ENETDOWN; + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; +#endif + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return -ENXIO; + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (unlikely(test_bit(__TXGBE_TX_DISABLED, &ring->state))) + return -ENXIO; +#endif + +#ifdef HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = txgbe_xmit_xdp_ring(ring, xdpf); + if (err != TXGBE_XDP_TX) { +#ifndef HAVE_XDP_NO_RETURN_RX + xdp_return_frame_rx_napi(xdpf); + drops++; +#else + break; +#endif + } + nxmit++; + } + + if (unlikely(flags & XDP_XMIT_FLUSH)){ + wmb(); + writel(ring->next_to_use, ring->tail); + } + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); +#ifndef HAVE_XDP_NO_RETURN_RX + return n - drops; +#else + return nxmit; +#endif +#else /* HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS */ + + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + err = txgbe_xmit_xdp_ring(ring, xdp); + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + if (err != TXGBE_XDP_TX) + return -ENOSPC; + + return 0; +#endif +} + +#ifndef NO_NDO_XDP_FLUSH +static void txgbe_xdp_flush(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__TXGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return; + + wmb(); + writel(ring->next_to_use, ring->tail); + + return; +} +#endif /* !NO_NDO_XDP_FLUSH */ +#endif /*HAVE_XDP_SUPPORT*/ + +#ifdef HAVE_NETDEV_SELECT_QUEUE + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) +static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused struct net_device *sb_dev, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int queue; +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_ring_feature *f; + int txq; +#endif + + if (adapter->vlan_rate_link_speed) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED) +#if IS_ENABLED(CONFIG_FCOE) + goto fcoe; +#else + goto skip_select; +#endif + + if (skb_vlan_tag_present(skb)) { + u16 vlan_id = skb_vlan_tag_get_id(skb); + if (test_bit(vlan_id, adapter->limited_vlans)) { + int r_idx = adapter->num_tx_queues - 1 - + txgbe_find_nth_limited_vlan(adapter, vlan_id); + return r_idx; + } + } + } +#if IS_ENABLED(CONFIG_FCOE) +fcoe: + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case __constant_htons(ETH_P_FCOE): + case __constant_htons(ETH_P_FIP): + adapter = netdev_priv(dev); + + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) + break; + fallthrough; + default: + goto skip_select; + } + + f = &adapter->ring_feature[RING_F_FCOE]; + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); + + while (txq >= f->indices) + txq -= f->indices; + + return txq + f->offset; +#endif/*FCOE*/ +skip_select: +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + queue = netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + queue = fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + queue = fallback(dev, skb); +#else + queue = __netdev_pick_tx(dev, skb); +#endif + if (adapter->vlan_rate_link_speed) + queue = queue % (adapter->num_tx_queues - + adapter->active_vlan_limited); + + return queue; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ + +static int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad) +{ + int err; + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0x1, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); + if (unlikely(err)) + goto free_skb; + } + + /* FIXME: The use of this function with non-linear skb's really needs + * to be audited. + */ + err = skb_linearize(skb); + if (unlikely(err)) + goto free_skb; + + memset(skb->data + skb->len, 0x1, pad); + return 0; + +free_skb: + kfree_skb(skb); + return err; +} + + +netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, + struct txgbe_adapter __maybe_unused *adapter, + struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + txgbe_dptype dptype; + u8 vlan_addlen = 0; + + /* work around hw errata 3 */ + u16 _llcLen, *llcLen; + llcLen = skb_header_pointer(skb, ETH_HLEN - 2, sizeof(u16), &_llcLen); + if (*llcLen == 0x3 || *llcLen == 0x4 || *llcLen == 0x5) { + if (txgbe_skb_pad_nonzero(skb, ETH_ZLEN - skb->len)) + return -ENOMEM; + __skb_put(skb, ETH_ZLEN - skb->len); + } + + /* + * need: 1 descriptor per page * PAGE_SIZE/TXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/TXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (txgbe_maybe_stop_tx(tx_ring, count + adapter->desc_reserved + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; + vlan_addlen += VLAN_HLEN; + } + + if (protocol == htons(ETH_P_8021Q) || + protocol == htons(ETH_P_8021AD)) { + tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; + vlan_addlen += VLAN_HLEN; + } + + protocol = vlan_get_protocol(skb); + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_tx(skb)->hardware) && + adapter->ptp_clock) { + if(!test_and_set_bit_lock(__TXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_tx(skb)->in_progress = 1; +#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__TXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + tx_flags |= TXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + +#endif +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= TXGBE_TX_FLAGS_CC; + +#endif +#ifdef HAVE_TX_MQ + if ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (TXGBE_TX_FLAGS_HW_VLAN | TXGBE_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL))) { + tx_flags &= ~TXGBE_TX_FLAGS_VLAN_PRIO_MASK; +#if IS_ENABLED(CONFIG_FCOE) + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if ((adapter->flags & TXGBE_FLAG_FCOE_ENABLED) && + ((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP)))) + tx_flags |= adapter->fcoe.up << + TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + else +#endif /* CONFIG_FCOE */ + tx_flags |= skb->priority << + TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & TXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + TXGBE_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= TXGBE_TX_FLAGS_HW_VLAN; + } + } + +#endif /* HAVE_TX_MQ */ + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + +#if IS_ENABLED(CONFIG_FCOE) + /* setup tx offload for FCoE */ + if ((protocol == htons(ETH_P_FCOE)) && + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { + tso = txgbe_fso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + + goto xmit_fcoe; + } else if (protocol == htons(ETH_P_FIP)) { + /* FCoE stack has a bug where it does not set the network + * header offset for FIP frames sent resulting into MACLEN + * being set to ZERO in the Tx context descriptor. + * This will cause MDD events when trying to Tx such frames. + */ + if (!skb_network_offset(skb)) { + if (tx_flags & (TXGBE_TX_FLAGS_HW_VLAN | + TXGBE_TX_FLAGS_SW_VLAN)) + skb_set_network_header(skb, + sizeof(struct ethhdr) + + sizeof(struct vlan_hdr) + + vlan_addlen); + else + skb_set_network_header(skb, + sizeof(struct ethhdr)); + } + } +#endif /* CONFIG_FCOE */ + + tso = txgbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + txgbe_tx_csum(tx_ring, first, dptype); + + /* add the ATR filter if ATR is on */ + if (test_bit(__TXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + txgbe_atr(tx_ring, first, dptype); + +#if IS_ENABLED(CONFIG_FCOE) +xmit_fcoe: +#endif /* CONFIG_FCOE */ +#ifdef HAVE_PTP_1588_CLOCK + if (txgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; +#else + txgbe_tx_map(tx_ring, first, hdr_len); +#endif + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +#ifdef HAVE_PTP_1588_CLOCK +cleanup_tx_tstamp: + if (unlikely(tx_flags & TXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } +#endif + + return NETDEV_TX_OK; +} + +static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + +#ifdef HAVE_TX_MQ + if (!adapter->num_tx_queues) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (unlikely(test_bit(__TXGBE_TX_DISABLED, &tx_ring->state))) + return NETDEV_TX_BUSY; +#endif + + if (tx_ring->tx_buffer_info == NULL) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + return txgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * txgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_set_mac(struct net_device *netdev, void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr((u8 *)addr->sa_data)) + return -EADDRNOTAVAIL; + + txgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + //memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, (u8 *)addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + txgbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); + + return 0; +} + +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) +/** + * txgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int txgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + + if (is_valid_ether_addr(hw->mac.san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, hw->mac.san_addr, + NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); + } + return err; +} + +/** + * txgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int txgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */ + + +static int txgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 value = 0; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + ret = txgbe_read_mdio(&hw->phy_dev, prtad, devad, mii->reg_num, + &value); + if (ret < 0) + return ret; + mii->val_out = value; + return 0; + } + return -EOPNOTSUPP; + } else { + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + return txgbe_write_mdio(&hw->phy_dev, prtad, devad, + mii->reg_num, mii->val_in); + } + return -EOPNOTSUPP; + } +} + +int txgbe_find_nth_limited_vlan(struct txgbe_adapter *adapter, int vlan) +{ + return bitmap_weight(adapter->limited_vlans, vlan+1) - 1; +} + +void txgbe_del_vlan_limit(struct txgbe_adapter *adapter, int vlan) +{ + int new_queue_rate_limit[64]; + int idx = 0; + int i = 0, j = 0; + + if (!test_bit(vlan, adapter->limited_vlans)) + return; + + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + for (; i < bitmap_weight(adapter->limited_vlans, 4096); i++) { + if (i != idx) + new_queue_rate_limit[j++] = adapter->queue_rate_limit[i]; + } + + memcpy(adapter->queue_rate_limit, new_queue_rate_limit, sizeof(int) * 64); + clear_bit(vlan, adapter->limited_vlans); + +} + +void txgbe_set_vlan_limit(struct txgbe_adapter *adapter, int vlan, int rate_limit) +{ + int new_queue_rate_limit[64]; + int idx = 0; + int i = 0, j = 0; + + if (test_and_set_bit(vlan, adapter->limited_vlans)) { + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + adapter->queue_rate_limit[idx] = rate_limit; + return; + } + + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + for (; j < bitmap_weight(adapter->limited_vlans, 4096); j++) { + if (j == idx) + new_queue_rate_limit[j] = rate_limit; + else + new_queue_rate_limit[j] = adapter->queue_rate_limit[i++]; + } + + memcpy(adapter->queue_rate_limit, new_queue_rate_limit, sizeof(int) * 64); + +} + +void txgbe_check_vlan_rate_limit(struct txgbe_adapter *adapter) +{ + int i; + + if (!adapter->vlan_rate_link_speed) + return; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + e_dev_info("Can't limit vlan rate when enable SRIOV or FCOE"); + goto resume_rate; + } + + if (txgbe_link_mbps(adapter) != adapter->vlan_rate_link_speed) { + dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. vlan Transmit rate is disabled\n"); + goto resume_rate; + } + + if (adapter->active_vlan_limited > adapter->num_tx_queues) { + e_dev_err("limited vlan bigger than num of tx ring, " + "disabled vlan limit\n"); + goto resume_rate; + } + + for (i = 0; i < adapter->active_vlan_limited; i++) { + txgbe_set_queue_rate_limit(&adapter->hw, + (adapter->num_tx_queues - i - 1), adapter->queue_rate_limit[i]); + } + return; +resume_rate: + e_dev_info("clear all vlan limit"); + bitmap_zero(adapter->limited_vlans, 4096); + adapter->active_vlan_limited = bitmap_weight(adapter->limited_vlans, 4096); + adapter->vlan_rate_link_speed = 0; + memset(adapter->queue_rate_limit, 0, sizeof(int) * 64); + adapter->vlan_rate_link_speed = 0; + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_set_queue_rate_limit(&adapter->hw, i , 0); + +} + +struct vlan_rate_param { + int count; + unsigned short vlans[64]; + unsigned int rates[64]; +}; + +#define SIOCSVLANRATE (SIOCDEVPRIVATE+0xe) +#define SIOCGVLANRATE (SIOCDEVPRIVATE+0xf) + +static int txgbe_vlan_rate_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_rate_param param; + int i; + int link_speed; + int set_num = 0; + + if (cmd != SIOCSVLANRATE) + return -EOPNOTSUPP; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED){ + e_dev_err("Not support vlan limit when enable SRIOV of FCOE"); + return -EINVAL; + } + + if (!netif_carrier_ok(netdev) || + adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) { + e_dev_err("please set vlan rate limit when link up, speed 1G not support"); + return -EINVAL; + } + + link_speed = txgbe_link_mbps(adapter); + + if (copy_from_user(¶m, ifr->ifr_data, sizeof(param))) + return -EFAULT; + + if (param.count == 0) { + e_dev_info("clear all vlan limit"); + bitmap_zero(adapter->limited_vlans, 4096); + adapter->vlan_rate_link_speed = 0; + memset(adapter->queue_rate_limit, 0, sizeof(int) * 64); + goto after_set; + } + + for (i = 0; i < param.count; i++) { + if ((param.vlans[i] > 4095) || + (param.rates[i] != 0 && param.rates[i] <= 10) || + (param.rates[i] > link_speed)) { + e_dev_err("Invalid param: VLAN_ID(0~4095): %d, rate(0,11~linkspeed):%d\n", + param.vlans[i], param.rates[i]); + return -EINVAL; + } + } + + for (i = 0; i < param.count; i++) + if (param.rates[i]) + set_num++; + else + if (test_bit(param.vlans[i], adapter->limited_vlans) && + param.rates[i] == 0) + set_num--; + + if (param.count <= 0 || param.count > 64 || + (set_num + adapter->active_vlan_limited > adapter->num_tx_queues - 1)) { + e_dev_err("Invalid VLAN set count: %d, now active limited vlan count:%d " + "total num of limited vlan should not bigger than (num_of_txring - 1):%d", + set_num, adapter->active_vlan_limited, adapter->num_tx_queues - 1); + return -EINVAL; + } + + adapter->vlan_rate_link_speed = link_speed; + for (i = 0; i < param.count; i++) + if (param.rates[i]) + txgbe_set_vlan_limit(adapter, param.vlans[i], param.rates[i]); + else + txgbe_del_vlan_limit(adapter, param.vlans[i]); +after_set: + /*clear all rate limit*/ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_set_queue_rate_limit(&adapter->hw, i, 0); + + adapter->active_vlan_limited = bitmap_weight(adapter->limited_vlans, 4096); + + for (i = 0; i < adapter->active_vlan_limited; i++) { + txgbe_set_queue_rate_limit(&adapter->hw, + adapter->num_tx_queues - i - 1, adapter->queue_rate_limit[i]); + } + return 0; +} + +static int txgbe_get_vlan_rate_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_rate_param param; + int i = 0, n = 0; + + if (cmd != SIOCGVLANRATE) + return -EOPNOTSUPP; + + for_each_set_bit(i, adapter->limited_vlans, 4096) { + param.vlans[n] = i; + param.rates[n] = adapter->queue_rate_limit[n]; + n++; + } + param.count = n; + + if (copy_to_user(ifr->ifr_data, ¶m, sizeof(param))) + return -EFAULT; + + return 0; +} + +static int txgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ +#ifdef HAVE_PTP_1588_CLOCK + struct txgbe_adapter *adapter = netdev_priv(netdev); +#endif + switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return txgbe_ptp_get_ts_config(adapter, ifr); +#endif + case SIOCSHWTSTAMP: + return txgbe_ptp_set_ts_config(adapter, ifr); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + return txgbe_mii_ioctl(netdev, ifr, cmd); + case SIOCSVLANRATE: + return txgbe_vlan_rate_ioctl(netdev, ifr, cmd); + case SIOCGVLANRATE: + return txgbe_get_vlan_rate_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + + +#ifdef HAVE_NDO_IOCTLPRIVATE +static int txgbe_siocdevprivate(struct net_device *netdev, struct ifreq *ifr, + void __user *data, int cmd) +{ + return txgbe_ioctl(netdev, ifr, cmd); +} +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void txgbe_netpoll(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + txgbe_msix_clean_rings(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; + } + } else { + txgbe_intr(0, adapter); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/* txgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to txgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void txgbe_validate_rtr(struct txgbe_adapter *adapter, u8 tc) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + reg = rd32(hw, TXGBE_RDB_UP2TC); + rsave = reg; + + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = 0xF & (reg >> (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0xF << (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + } + + if (reg != rsave) + wr32(hw, TXGBE_RDB_UP2TC, reg); + + return; +} + +/** + * txgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). + * Populate the netdev user priority to tc map + */ +static void txgbe_set_prio_tc_map(struct txgbe_adapter __maybe_unused *adapter) +{ +#ifdef HAVE_DCBNL_IEEE + struct net_device *dev = adapter->netdev; + struct txgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->txgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < TXGBE_DCB_MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = txgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +#else + UNREFERENCED_PARAMETER(adapter); +#endif /*HAVE_DCBNL_IEEE*/ +} + +/** + * txgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int txgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* Hardware supports up to 8 traffic classes */ + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (tc && adapter->num_vmdqs > TXGBE_MAX_DCBMACVLANS) + return -EBUSY; + + if (adapter->xdp_prog) { + if (adapter->num_rx_queues > TXGBE_MAX_XDP_RSS_INDICES) { + adapter->old_rss_limit = adapter->ring_feature[RING_F_RSS].limit; + adapter->ring_feature[RING_F_FDIR].limit = TXGBE_MAX_XDP_RSS_INDICES; + adapter->ring_feature[RING_F_RSS].limit = TXGBE_MAX_XDP_RSS_INDICES; + e_dev_info("limit tx rx ring to 32 " + "because hw limit and xdpring take up half of the txring"); + } + else { + adapter->old_rss_limit = 0; + } + } + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(adapter); + + txgbe_clear_interrupt_scheme(adapter); + + if (tc) { + netdev_set_num_tc(dev, tc); + txgbe_set_prio_tc_map(adapter); + + adapter->flags |= TXGBE_FLAG_DCB_ENABLED; + + } else { + netdev_reset_tc(dev); + + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + txgbe_validate_rtr(adapter, tc); + + txgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + txgbe_open(dev); + return 0; +} + +#ifdef NETIF_F_HW_TC +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX +static int txgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return txgbe_setup_tc(dev, mqprio->num_tc); +} +#endif +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) +static int +__txgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX) +static int +__txgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, + u32 chain_index, __always_unused __be16 proto, + struct tc_to_netdev *tc) +#else +static int +__txgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, + __always_unused __be16 proto, struct tc_to_netdev *tc) +#endif +{ +#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + unsigned int type = tc->type; + +#ifdef HAVE_NDO_SETUP_TC_CHAIN_INDEX + if (chain_index) + return -EOPNOTSUPP; + +#endif +#endif + switch (type) { + case TC_SETUP_QDISC_MQPRIO: +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) + return txgbe_setup_tc_mqprio(dev, type_data); +#elif defined(TC_MQPRIO_HW_OFFLOAD_MAX) && \ + (!(defined(TXGBE_SUPPORT_KYLIN_FT))) && \ + (!(defined(TXGBE_SUPPORT_KYLIN_LX))) + return txgbe_setup_tc_mqprio(dev, tc->mqprio); +#else + return txgbe_setup_tc(dev, tc->tc); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif /* NETIF_F_HW_TC */ + +#ifdef CONFIG_PCI_IOV +void txgbe_sriov_reinit(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + txgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} +#endif + +void txgbe_do_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); +} + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 txgbe_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t txgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); +#if IS_ENABLED(CONFIG_DCB) + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX; +#elif (defined NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_RX; +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + features |= NETIF_F_HW_VLAN_STAG_RX; +#else + features |= NETIF_F_HW_VLAN_RX; +#endif +#endif /* CONFIG_DCB */ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + +#ifdef TXGBE_NO_LRO + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; +#endif + if (adapter->xdp_prog) { + adapter->lro_before_xdp = !!(features & NETIF_F_LRO); + if (features & NETIF_F_LRO) { + e_dev_err("LRO is not supported with XDP\n"); + features &= ~NETIF_F_LRO; + } + } + +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { +#if defined(NETIF_F_HW_VLAN_STAG_FILTER) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } else { + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; +#endif + } +#endif + +#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + else + features |= NETIF_F_HW_VLAN_STAG_RX; + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + else + features |= NETIF_F_HW_VLAN_STAG_TX; +#endif + return features; +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int txgbe_set_features(struct net_device *netdev, u32 features) +#else +static int txgbe_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + netdev_features_t changed = netdev->features ^ features; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((netdev->features ^ features) & NETIF_F_LRO) { +#ifdef TXGBE_NO_LRO + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); +#else + e_info(probe, "rx-usecs set too low, " + "falling back to software LRO\n"); +#endif + } + } + + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + +#ifdef HAVE_VXLAN_CHECKS + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM) { + //if (!need_reset) + // adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + txgbe_clear_vxlan_port(adapter); + } +#endif /* HAVE_VXLAN_CHECKS */ + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, ~TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~TXGBE_FLAG2_RSS_ENABLED; + } + } + + netdev->features = features; + +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + need_reset = true; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + if (changed & NETIF_F_HW_VLAN_RX) + need_reset = true; +#endif + + if (need_reset) + txgbe_do_reset(netdev); +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + else if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + txgbe_set_rx_mode(netdev); +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + else if (changed & NETIF_F_HW_VLAN_FILTER) + txgbe_set_rx_mode(netdev); +#endif + return 0; + +} +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +/** + * txgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information **/ -static int txgbe_open(struct net_device *netdev) +static void txgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) { - struct wx *wx = netdev_priv(netdev); - int err; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u16 port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + wr32(hw, TXGBE_CFG_VXLAN, port); + break; + case UDP_TUNNEL_TYPE_GENEVE: + // if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + // return; + + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } + + adapter->geneve_port = port; + wr32(hw, TXGBE_CFG_GENEVE, port); + break; + default: + return; + } + + +} + +/** + * txgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void txgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ntohs(ti->port)) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + txgbe_clear_vxlan_port(adapter); + adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; + break; + case UDP_TUNNEL_TYPE_GENEVE: +// if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) +// return; + + if (adapter->geneve_port != ntohs(ti->port)) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + adapter->geneve_port = 0; + break; + default: + return; + } + +} +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +static int txgbe_udp_tunnel_set(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + txgbe_add_udp_tunnel_port(dev, ti); + return 0; +} + +static int txgbe_udp_tunnel_unset(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + txgbe_del_udp_tunnel_port(dev, ti); + + return 0; +} + +static const struct udp_tunnel_nic_info txgbe_udp_tunnels = { + .set_port = txgbe_udp_tunnel_set, + .unset_port = txgbe_udp_tunnel_unset, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + +#endif +#elif defined(HAVE_VXLAN_RX_OFFLOAD) +/** + * txgbe_add_vxlan_port - Get notifications about VXLAN ports that come up + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifiying us about + * @port: New UDP port number that VXLAN started listening to + */ +static void txgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u16 new_port = ntohs(port); + + if (sa_family == AF_INET6) + return; + + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (adapter->vxlan_port == new_port) { + netdev_info(dev, "Port %d already offloaded\n", new_port); + return; + } + if (adapter->vxlan_port) { + netdev_info(dev, + "Maximum VXLAN offload ports reached, not " + "offloading port %d\n", + new_port); + return; + } + adapter->vxlan_port = new_port; + wr32(hw, TXGBE_CFG_VXLAN, new_port); +} + +/** + * txgbe_del_vxlan_port - Get notifications about VXLAN ports that go away + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + */ +static void txgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u16 new_port = ntohs(port); + + if (sa_family == AF_INET6) + return; + + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (adapter->vxlan_port != new_port) { + netdev_info(dev, "Port %d was not found, not deleting\n", + new_port); + return; + } + + txgbe_clear_vxlan_port(adapter); + adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; +} +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef HAVE_NDO_GSO_CHECK +static bool +txgbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) +{ + return vxlan_gso_check(skb); +} +#endif /* HAVE_NDO_GSO_CHECK */ + +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +static int txgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, +#ifdef HAVE_NDO_FDB_ADD_VID + u16 vid, +#endif +#ifdef HAVE_NDO_FDB_ADD_EXTACK + u16 flags, + struct netlink_ext_ack __always_unused *extack) +#else + u16 flags) +#endif +#else +static int txgbe_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +#endif /* USE_CONST_DEV_UC_CHAR */ +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (TXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; + } + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_NDO_FDB_ADD_VID + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +#else + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); +#endif /* HAVE_NDO_FDB_ADD_VID */ +#else + return ndo_dflt_fdb_add(ndm, dev, addr, flags); +#endif /* USE_CONST_DEV_UC_CHAR */ +} + +#ifdef HAVE_BRIDGE_ATTRIBS +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK +static int txgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS) +static int txgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags) +#else +static int txgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +#endif /* HAVE_NDO_BRIDGE_SETLINK_EXTACK */ +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + return -EINVAL; + } - err = wx_setup_resources(wx); - if (err) - goto err_reset; + adapter->bridge_mode = mode; - wx_configure(wx); + /* re-configure settings related to bridge mode */ + txgbe_configure_bridge_mode(adapter); - err = txgbe_request_irq(wx); - if (err) - goto err_free_isb; + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); - if (err) - goto err_free_irq; + return 0; +} - err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); - if (err) - goto err_free_irq; +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int txgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int txgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int txgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined (NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +#ifdef HAVE_NDO_FEATURES_CHECK +#define TXGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t +txgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ +#ifndef HAVE_VLAN_NUM_ERROR + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) { + vlan_num++; + } - txgbe_up_complete(wx); + if (vlan_depth) { + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } - return 0; + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } -err_free_irq: - wx_free_irq(wx); -err_free_isb: - wx_free_isb_resources(wx); -err_reset: - txgbe_reset(wx); + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); +#endif - return err; + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + TXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + + if (skb->encapsulation) { + if (skb->inner_protocol_type == ENCAP_TYPE_ETHER && + skb->inner_protocol != htons(ETH_P_IP) && + skb->inner_protocol != htons(ETH_P_IPV6) && + skb->inner_protocol != htons(ETH_P_TEB)) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + + return features; } +#endif /* HAVE_NDO_FEATURES_CHECK */ -/** - * txgbe_close_suspend - actions necessary to both suspend and close flows - * @wx: the private wx struct - * - * This function should contain the necessary work common to both suspending - * and closing of the device. - */ -static void txgbe_close_suspend(struct wx *wx) +#ifdef HAVE_VIRTUAL_STATION +static inline int txgbe_inc_vmdqs(struct txgbe_fwd_adapter *accel) { - txgbe_disable_device(wx); - wx_free_resources(wx); + struct txgbe_adapter *adapter = accel->adapter; + + if (++adapter->num_vmdqs > 1 || adapter->num_vfs > 0) + adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED | + TXGBE_FLAG_SRIOV_ENABLED; + accel->index = find_first_zero_bit(&adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS); + set_bit(accel->index, &adapter->fwd_bitmask); + + return 1 + find_last_bit(&adapter->fwd_bitmask, TXGBE_MAX_MACVLANS); } -/** - * txgbe_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int txgbe_close(struct net_device *netdev) +static inline int txgbe_dec_vmdqs(struct txgbe_fwd_adapter *accel) { - struct wx *wx = netdev_priv(netdev); + struct txgbe_adapter *adapter = accel->adapter; - txgbe_down(wx); - wx_free_irq(wx); - wx_free_resources(wx); - wx_control_hw(wx, false); + if (--adapter->num_vmdqs == 1 && adapter->num_vfs == 0) + adapter->flags &= ~(TXGBE_FLAG_VMDQ_ENABLED | + TXGBE_FLAG_SRIOV_ENABLED); + clear_bit(accel->index, &adapter->fwd_bitmask); - return 0; + return 1 + find_last_bit(&adapter->fwd_bitmask, TXGBE_MAX_MACVLANS); } -static void txgbe_dev_shutdown(struct pci_dev *pdev) +static void *txgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { - struct wx *wx = pci_get_drvdata(pdev); - struct net_device *netdev; + struct txgbe_fwd_adapter *accel = NULL; + struct txgbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_vmdqs; + int err; - netdev = wx->netdev; - netif_device_detach(netdev); + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return ERR_PTR(-EPERM); - rtnl_lock(); - if (netif_running(netdev)) - txgbe_close_suspend(wx); - rtnl_unlock(); + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * than the available number of pools. + */ + if (used_pools >= TXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_RPS + if (vdev->num_rx_queues != vdev->num_tx_queues) { + netdev_info(pdev, "%s: Only supports a single queue count for " + "TX and RX\n", + vdev->name); + return ERR_PTR(-EINVAL); + } +#endif + /* Check for hardware restriction on number of rx/tx queues */ + if (vdev->num_tx_queues != 2 && vdev->num_tx_queues != 4) { + netdev_info(pdev, + "%s: Supports RX/TX Queue counts 2, and 4\n", + pdev->name); + return ERR_PTR(-EINVAL); + } + + if ((adapter->flags & TXGBE_FLAG_DCB_ENABLED && + adapter->num_vmdqs > TXGBE_MAX_DCBMACVLANS - 1) || + (adapter->num_vmdqs > TXGBE_MAX_MACVLANS - 1)) + return ERR_PTR(-EBUSY); + + accel = kzalloc(sizeof(*accel), GFP_KERNEL); + if (!accel) + return ERR_PTR(-ENOMEM); + accel->adapter = adapter; + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->ring_feature[RING_F_VMDQ].limit = txgbe_inc_vmdqs(accel); + adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; - wx_control_hw(wx, false); + /* Force reinit of ring allocation with VMDQ enabled */ + err = txgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + if (err) + goto fwd_add_err; - pci_disable_device(pdev); + err = txgbe_fwd_ring_up(vdev, accel); + if (err) + goto fwd_add_err; + + netif_tx_start_all_queues(vdev); + return accel; +fwd_add_err: + /* unwind counter and free adapter struct */ + netdev_info(pdev, + "%s: dfwd hardware acceleration failed\n", vdev->name); + txgbe_dec_vmdqs(accel); + kfree(accel); + return ERR_PTR(err); } -static void txgbe_shutdown(struct pci_dev *pdev) +static void txgbe_fwd_del(struct net_device *pdev, void *fwd_priv) { - txgbe_dev_shutdown(pdev); - - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } + struct txgbe_fwd_adapter *accel = fwd_priv; + struct txgbe_adapter *adapter = accel->adapter; + + if (!accel || adapter->num_vmdqs <= 1) + return; + + adapter->ring_feature[RING_F_VMDQ].limit = txgbe_dec_vmdqs(accel); + txgbe_fwd_ring_down(accel->vdev, accel); + txgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->index, adapter->num_vmdqs, + accel->rx_base_queue, + accel->rx_base_queue + adapter->queues_per_pool, + adapter->fwd_bitmask); + kfree(accel); } +#endif /*HAVE_VIRTUAL_STATION*/ + +#ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, - .ndo_change_mtu = wx_change_mtu, - .ndo_start_xmit = wx_xmit_frame, - .ndo_set_rx_mode = wx_set_rx_mode, - .ndo_set_features = wx_set_features, + .ndo_start_xmit = txgbe_xmit_frame, +#ifdef HAVE_NETDEV_SELECT_QUEUE + .ndo_select_queue = txgbe_select_queue, +#else + .ndo_select_queue = __netdev_pick_tx, +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + .ndo_set_rx_mode = txgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = wx_set_mac, - .ndo_get_stats64 = wx_get_stats64, - .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, + .ndo_set_mac_address = txgbe_set_mac, +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = txgbe_change_mtu, +#else + .ndo_change_mtu = txgbe_change_mtu, +#endif + .ndo_tx_timeout = txgbe_tx_timeout, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + .ndo_vlan_rx_add_vid = txgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = txgbe_vlan_rx_kill_vid, +#endif +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = txgbe_ioctl, +#else + .ndo_do_ioctl = txgbe_ioctl, +#endif /* HAVE_NDO_ETH_IOCTL */ +#ifdef HAVE_NDO_IOCTLPRIVATE + .ndo_siocdevprivate = txgbe_siocdevprivate, +#endif +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = txgbe_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = txgbe_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = txgbe_ndo_set_vf_vlan, +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = txgbe_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = txgbe_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = txgbe_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = txgbe_ndo_set_vf_link_state, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = txgbe_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = txgbe_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = txgbe_ndo_get_vf_config, +#endif +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = txgbe_get_stats64, +#else + .ndo_get_stats = txgbe_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ +#ifdef HAVE_SETUP_TC +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC + .extended.ndo_setup_tc_rh = __txgbe_setup_tc, +#else +#ifdef NETIF_F_HW_TC + .ndo_setup_tc = __txgbe_setup_tc, +#else + .ndo_setup_tc = txgbe_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */ +#endif /* HAVE_SETUP_TC */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = txgbe_netpoll, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = txgbe_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ +#if IS_ENABLED(CONFIG_FCOE) + .ndo_fcoe_ddp_setup = txgbe_fcoe_ddp_get, +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET + .ndo_fcoe_ddp_target = txgbe_fcoe_ddp_target, +#endif + .ndo_fcoe_ddp_done = txgbe_fcoe_ddp_put, +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE + .ndo_fcoe_enable = txgbe_fcoe_enable, + .ndo_fcoe_disable = txgbe_fcoe_disable, +#endif +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN + .ndo_fcoe_get_wwn = txgbe_fcoe_get_wwn, +#endif +#endif /* CONFIG_FCOE */ +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = &txgbe_vlan_mode, +#endif +#ifdef HAVE_FDB_OPS + .ndo_fdb_add = txgbe_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = ndo_dflt_fdb_del, + .ndo_fdb_dump = ndo_dflt_fdb_dump, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = txgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = txgbe_ndo_bridge_getlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif +#ifdef HAVE_VIRTUAL_STATION + .ndo_dfwd_add_station = txgbe_fwd_add, + .ndo_dfwd_del_station = txgbe_fwd_del, +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL + .extended.ndo_udp_tunnel_add = txgbe_add_udp_tunnel_port, + .extended.ndo_udp_tunnel_del = txgbe_del_udp_tunnel_port, +#else +#ifndef HAVE_UDP_TUNNEL_NIC_INFO + .ndo_udp_tunnel_add = txgbe_add_udp_tunnel_port, + .ndo_udp_tunnel_del = txgbe_del_udp_tunnel_port, +#endif +#endif +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + .ndo_add_vxlan_port = txgbe_add_vxlan_port, + .ndo_del_vxlan_port = txgbe_del_vxlan_port, +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_NDO_GSO_CHECK + .ndo_gso_check = txgbe_gso_check, +#endif /* HAVE_NDO_GSO_CHECK */ +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = txgbe_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF + .ndo_bpf = txgbe_xdp, +#else + .ndo_xdp = txgbe_xdp, +#endif/*HAVE_NDO_BPF*/ + .ndo_xdp_xmit = txgbe_xdp_xmit, +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_NDO_XSK_WAKEUP + .ndo_xsk_wakeup = txgbe_xsk_wakeup, +#else + .ndo_xsk_async_xmit = txgbe_xsk_async_xmit, +#endif +#endif/*HAVE_AF_XDP_ZC_SUPPORT*/ +#ifndef NO_NDO_XDP_FLUSH + .ndo_xdp_flush = txgbe_xdp_flush, +#endif /* !NO_NDO_XDP_FLUSH */ +#endif/*HAVE_XDP_SUPPORT*/ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext txgbe_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = txgbe_set_features, + .ndo_fix_features = txgbe_fix_features, +#endif /* HAVE_NDO_SET_FEATURES */ }; +#endif /* HAVE_NET_DEVICE_OPS */ + +void txgbe_assign_netdev_ops(struct net_device *dev) +{ +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops = &txgbe_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(dev, &txgbe_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = &txgbe_open; + dev->stop = &txgbe_close; + dev->hard_start_xmit = &txgbe_xmit_frame; + dev->get_stats = &txgbe_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &txgbe_set_rx_mode; +#endif + dev->set_multicast_list = &txgbe_set_rx_mode; + dev->set_mac_address = &txgbe_set_mac; + dev->change_mtu = &txgbe_change_mtu; + dev->do_ioctl = &txgbe_ioctl; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &txgbe_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) + dev->vlan_rx_register = &txgbe_vlan_mode; + dev->vlan_rx_add_vid = &txgbe_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &txgbe_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &txgbe_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if HAVE_NETDEV_SELECT_QUEUE + dev->select_queue = &txgbe_select_queue; +#else + dev->select_queue = &__netdev_pick_tx; +#endif +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* HAVE_NET_DEVICE_OPS */ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(dev)->ndo_busy_poll = txgbe_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + + txgbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} /** * txgbe_probe - Device Initialization Routine @@ -522,99 +13336,210 @@ static const struct net_device_ops txgbe_netdev_ops = { * Returns 0 on success, negative on failure * * txgbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the wx private structure, + * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int txgbe_probe(struct pci_dev *pdev, - const struct pci_device_id __always_unused *ent) +static int __devinit txgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) { struct net_device *netdev; - int err, expected_gts; - struct wx *wx = NULL; - struct txgbe *txgbe; - - u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; + struct txgbe_adapter *adapter = NULL; + struct txgbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + u16 offset = 0; + u16 eeprom_verh = 0, eeprom_verl = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; + u32 etrack_id = 0; u16 build = 0, major = 0, patch = 0; + char *info_string, *i_s_var; u8 part_str[TXGBE_PBANUM_LENGTH]; - u32 etrack_id = 0; +#ifdef HAVE_TX_MQ + unsigned int indices = MAX_TX_QUEUES; +#endif /* HAVE_TX_MQ */ + bool disable_dev = false; +#if IS_ENABLED(CONFIG_FCOE) +#endif /* IS_ENABLED(CONFIG_FCOE) */ +#ifndef NETIF_F_GSO_PARTIAL +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + u32 hw_features; +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_FEATURES */ +#endif /* NETIF_F_GSO_PARTIAL */ +#ifdef TXGBE_DIS_COMP_TIMEOUT + u16 pvalue = 0; +#endif err = pci_enable_device_mem(pdev); if (err) return err; - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_pci_disable_dev; + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; } - err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - txgbe_driver_name); + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), txgbe_driver_name); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "pci_request_selected_regions failed 0x%x\n", err); - goto err_pci_disable_dev; + goto err_pci_reg; + } + + hw = vmalloc(sizeof(struct txgbe_hw)); + if (!hw) { + pr_info("Unable to allocate memory for early mac " + "check\n"); + } else { + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + vfree(hw); } +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif pci_set_master(pdev); + /* errata 16 */ + if (MAX_REQUEST_SIZE == 512) { + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x2000); + } else { + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + } - netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct wx), - TXGBE_MAX_TX_QUEUES, - TXGBE_MAX_RX_QUEUES); +#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct txgbe_adapter), indices); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct txgbe_adapter)); +#endif /* HAVE_TX_MQ */ if (!netdev) { err = -ENOMEM; - goto err_pci_release_regions; + goto err_alloc_etherdev; } - SET_NETDEV_DEV(netdev, &pdev->dev); - - wx = netdev_priv(netdev); - wx->netdev = netdev; - wx->pdev = pdev; - - wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - wx->hw_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!wx->hw_addr) { + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + adapter->indices = indices; +#endif +#endif /* HAVE_TX_MQ */ + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { err = -EIO; - goto err_pci_release_regions; + goto err_ioremap; } - wx->driver_name = txgbe_driver_name; - txgbe_set_ethtool_ops(netdev); - netdev->netdev_ops = &txgbe_netdev_ops; + txgbe_assign_netdev_ops(netdev); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; /* setup the private structure */ - err = txgbe_sw_init(wx); + err = txgbe_sw_init(adapter); if (err) - goto err_free_mac_table; + goto err_sw_init; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) && defined(HAVE_UDP_TUNNEL_NIC_INFO) + netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels; +#endif + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + txgbe_check_options(adapter); + txgbe_bp_mode_setting(adapter); + TCALL(hw, mac.ops.set_lan_id); /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); + err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PERST); if (err) - goto err_free_mac_table; - err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); + goto err_sw_init; + err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PWRRST); if (err) - goto err_free_mac_table; + goto err_sw_init; + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_get_hw_control(adapter); + err = TCALL(hw, mac.ops.reset_hw); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_release_hw_control(adapter); + + /* Store the permanent mac address */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); + hw->phy.reset_if_overtemp = false; + if (err == TXGBE_ERR_SFP_NOT_PRESENT) { + err = 0; + } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("failed to load because an unsupported SFP+ " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } - err = wx_mng_present(wx); - if (err) { - dev_err(&pdev->dev, "Management capability is not present\n"); - goto err_free_mac_table; + if (txgbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); +#ifdef CONFIG_PCI_IOV +#ifdef HAVE_SRIOV_CONFIGURE + if (adapter->max_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module " + "parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x" + "/sriov_numvfs\n", + adapter->max_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn) + ); } - err = txgbe_reset_hw(wx); - if (err) { - dev_err(&pdev->dev, "HW Init failed: %d\n", err); - goto err_free_mac_table; +#endif + if (adapter->flags & TXGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); + txgbe_enable_sriov(adapter); } +#endif /* CONFIG_PCI_IOV */ +#ifdef NETIF_F_GSO_PARTIAL netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | @@ -622,61 +13547,285 @@ static int txgbe_probe(struct pci_dev *pdev, NETIF_F_RXCSUM | NETIF_F_HW_CSUM; - netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL; - netdev->features |= netdev->gso_partial_features; + netdev->gso_partial_features = TXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + TXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_SCTP_CRC; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; + + netdev->hw_features |= NETIF_F_NTUPLE | + NETIF_F_HW_TC; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->hw_enc_features |= netdev->vlan_features; - netdev->features |= NETIF_F_VLAN_FEATURES; - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features | NETIF_F_RXALL; - netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - netdev->features |= NETIF_F_HIGHDMA; - netdev->hw_features |= NETIF_F_GRO; - netdev->features |= NETIF_F_GRO; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) { + netdev->hw_features |= NETIF_F_LRO; + netdev->features |= NETIF_F_LRO; + } +#else /* NETIF_F_GSO_PARTIAL */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif + +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; +#endif + netdev->features |= txgbe_tso_features(); +#ifdef NETIF_F_RXHASH + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) + netdev->features |= NETIF_F_RXHASH; +#endif + netdev->features |= NETIF_F_RXCSUM; +#ifdef HAVE_VIRTUAL_STATION + netdev->features |= NETIF_F_HW_L2FW_DOFFLOAD; +#endif +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif + hw_features |= netdev->features; + + /* give us the option of enabling RSC/LRO later */ +#ifdef TXGBE_NO_LRO + if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) +#endif + hw_features |= NETIF_F_LRO; + +#else /* !HAVE_NDO_SET_FEATURES */ +#ifdef NETIF_F_GRO + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_FILTER; +#endif + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->features |= NETIF_F_NTUPLE; + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; +#ifdef HAVE_NDO_SET_FEATURES + hw_features |= NETIF_F_SCTP_CSUM | + NETIF_F_NTUPLE; +#endif + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif + +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | TXGBE_GSO_PARTIAL_FEATURES | NETIF_F_TSO; +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD + netdev->hw_enc_features |= NETIF_F_RXCSUM; + +#endif /* NETIF_F_GSO_PARTIAL */ + +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + if (netdev->features & NETIF_F_LRO) { + if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + ((adapter->rx_itr_setting == 1) || + (adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR))) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + } else if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) { +#ifdef TXGBE_NO_LRO + e_dev_info("InterruptThrottleRate set too high, " + "disabling RSC\n"); +#else + e_dev_info("InterruptThrottleRate set too high, " + "falling back to software LRO\n"); +#endif + } + } +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS + netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); +#else netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#endif +#endif /* HAVE_NETDEVICE_MIN_MAX_MTU */ + +#if IS_ENABLED(CONFIG_DCB) + netdev->dcbnl_ops = &dcbnl_ops; +#endif /* CONFIG_DCB */ + +#if IS_ENABLED(CONFIG_FCOE) +#ifdef NETIF_F_FSO + if (adapter->flags & TXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + + { + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + txgbe_fcoe_ddp_enable(adapter); + adapter->flags |= TXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ + } + + fcoe_l = min_t(int, TXGBE_RDB_FCRE_TBL_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; + +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } +#endif /* NETIF_F_FSO */ +#endif /* CONFIG_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_HIGHDMA; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } + TCALL(hw, eeprom.ops.init_params); /* make sure the EEPROM is good */ - err = txgbe_validate_eeprom_checksum(wx, NULL); - if (err != 0) { - dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); - wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); + + if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_SW_RST); + err = -EIO; + goto err_sw_init; + } + + eth_hw_addr_set(netdev, hw->mac.perm_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); err = -EIO; - goto err_free_mac_table; + goto err_sw_init; } - eth_hw_addr_set(netdev, wx->mac.perm_addr); - wx_mac_set_default_filter(wx, wx->mac.perm_addr); + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + memset(&adapter->etype_filter_info, 0, + sizeof(struct txgbe_etype_filter_info)); + memset(&adapter->ft_filter_info, 0, + sizeof(struct txgbe_5tuple_filter_info)); + + timer_setup(&adapter->service_timer, txgbe_service_timer, 0); + +#ifdef POLL_LINK_STATUS + timer_setup(&adapter->link_check_timer, txgbe_link_check_timer, 0); +#endif - err = wx_init_interrupt_scheme(wx); + if (TXGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, txgbe_service_task); + INIT_WORK(&adapter->sfp_sta_task, txgbe_sfp_phy_status_work); + set_bit(__TXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); + + err = txgbe_init_interrupt_scheme(adapter); if (err) - goto err_free_mac_table; + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; - /* Save off EEPROM version number and Option Rom version which + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) { + adapter->wol = TXGBE_PSR_WKUP_CTL_MAG; + wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); + } + hw->wol_enabled = !!(adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + /* + * Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom */ - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, - &eeprom_verh); - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, - &eeprom_verl); + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, + &eeprom_verh); + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, + &eeprom_verl); etrack_id = (eeprom_verh << 16) | eeprom_verl; - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, - &offset); + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, &offset); /* Make sure offset to SCSI block is valid */ if (!(offset == 0x0) && !(offset == 0xffff)) { - wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); - wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); + TCALL(hw, eeprom.ops.read, offset + 0x84, &eeprom_cfg_blkh); + TCALL(hw, eeprom.ops.read, offset + 0x83, &eeprom_cfg_blkl); /* Only display Option Rom if exist */ if (eeprom_cfg_blkl && eeprom_cfg_blkh) { @@ -684,77 +13833,248 @@ static int txgbe_probe(struct pci_dev *pdev, build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); patch = eeprom_cfg_blkh & 0x00ff; - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), "0x%08x, %d.%d.%d", etrack_id, major, build, patch); } else { - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), "0x%08x", etrack_id); } } else { - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), "0x%08x", etrack_id); } - if (etrack_id < 0x20010) - dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + adapter->etrack_id = etrack_id; - txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); - if (!txgbe) { - err = -ENOMEM; - goto err_release_hw; + if (strcmp(adapter->eeprom_id, adapter->fl_version) == 0) { + memcpy(adapter->fw_version, adapter->eeprom_id, sizeof(adapter->eeprom_id)); + + if (hw->bus.lan_id == 0) + e_dev_info("Running Firmware Version: %s\n", adapter->eeprom_id); + } else { + snprintf(adapter->fw_version, sizeof(adapter->fw_version), "%s,ACT.%s", + adapter->fl_version, adapter->eeprom_id); + + if (hw->bus.lan_id == 0) + e_dev_info("Running Firmware Version: %s, Flash Firmware Version: %s\n", + adapter->eeprom_id, adapter->fl_version); } - txgbe->wx = wx; - wx->priv = txgbe; + /* reset the hardware with the new settings */ + err = TCALL(hw, mac.ops.start_hw); + if (err == TXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + } else if (err) { + e_dev_err("HW init failed\n"); + goto err_register; + } - err = txgbe_init_phy(txgbe); - if (err) - goto err_release_hw; + /* pick up the PCI bus settings for reporting later */ + TCALL(hw, mac.ops.get_bus_info); + strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) - goto err_remove_phy; + goto err_register; + + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); +#endif + + /* power down the optics for SFP+ fiber or mv phy */ + if(!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.disable_tx_laser); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } - pci_set_drvdata(pdev, wx); + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ netif_tx_stop_all_queues(netdev); +#if IS_ENABLED(CONFIG_TPH) + if (adapter->flags & TXGBE_FLAG_TPH_CAPABLE) { + err = dca_add_requester(pci_dev_to_dev(pdev)); + switch (err) { + case 0: + adapter->flags |= TXGBE_FLAG_TPH_ENABLED; + txgbe_setup_tph(adapter); + break; + /* -19 is returned from the kernel when no provider is found */ + case -19: + e_info(rx_err, "No TPH provider found. Please " + "start ioatdma for DCA functionality.\n"); + break; + default: + e_info(probe, "DCA registration failed: %d\n", err); + break; + } + } +#endif + + /* print all messages at the end so that we use our eth%d name */ + /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough * bandwidth due to being older generation PCIe parts. We clamp these * parts to ensure that no warning is displayed, as this could confuse - * users otherwise. - */ - expected_gts = txgbe_enumerate_functions(wx) * 10; + * users otherwise. */ + + expected_gts = txgbe_enumerate_functions(adapter) * 10; + /* don't check link if we failed to enumerate functions */ if (expected_gts > 0) - txgbe_check_minimum_link(wx); + txgbe_check_minimum_link(adapter, expected_gts); + + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + e_info(probe, "NCSI : support"); else - dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); + e_info(probe, "NCSI : unsupported"); /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); + err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH); if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); + strscpy((char *)part_str, "Unknown", TXGBE_PBANUM_LENGTH); + if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present) + e_info(probe, "PHY: %d, SFP+: %d, PBA No: %s\n", + hw->phy.type, hw->phy.sfp_type, part_str); + else + e_info(probe, "PHY: %d, PBA No: %s\n", + hw->phy.type, part_str); + + e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) + i_s_var += sprintf(i_s_var, "FCoE "); +#endif + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) + i_s_var += sprintf(i_s_var, "FdirHash "); + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + i_s_var += sprintf(i_s_var, "DCB "); + if (adapter->flags & TXGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + i_s_var += sprintf(i_s_var, "RSC "); +#ifndef TXGBE_NO_LRO + else if (netdev->features & NETIF_F_LRO) + i_s_var += sprintf(i_s_var, "LRO "); +#endif + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) + i_s_var += sprintf(i_s_var, "vxlan_rx "); + + BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: +#ifdef CONFIG_PCI_IOV + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) + txgbe_vf_configuration(pdev, (i | 0x10000000)); + } +#endif + +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) + /* add san mac addr to netdev */ + txgbe_add_sanmac_netdev(netdev); + +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + e_info(probe, "WangXun(R) RP1000/RP2000/FF50XX Network Connection\n"); + cards_found++; +#ifdef TXGBE_SYSFS + if (txgbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#ifdef TXGBE_PROCFS + if (txgbe_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* TXGBE_PROCFS */ +#endif /* TXGBE_SYSFS */ + +#ifdef HAVE_TXGBE_DEBUG_FS + txgbe_dbg_adapter_init(adapter); +#endif /* HAVE_TXGBE_DEBUG_FS */ + + /* setup link for SFP devices with MNG FW, else wait for TXGBE_UP */ + if (txgbe_mng_present(hw) && txgbe_is_sfp(hw) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + TCALL(hw, mac.ops.setup_link, + TXGBE_LINK_SPEED_25GB_FULL | TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL, true); + + TCALL(hw, mac.ops.setup_eee, + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && + (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); + + if (hw->mac.type == txgbe_mac_sp) + if (TXGBE_DIS_COMP_TIMEOUT == 1) { + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &pvalue); + pvalue = pvalue | 0x10; + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, pvalue); + adapter->cmplt_to_dis = true; + e_info(probe, "disable completion timeout\n"); + } return 0; -err_remove_phy: - txgbe_remove_phy(txgbe); -err_release_hw: - wx_clear_interrupt_scheme(wx); - wx_control_hw(wx, false); -err_free_mac_table: - kfree(wx->mac_table); -err_pci_release_regions: +err_register: + txgbe_clear_interrupt_scheme(adapter); + txgbe_release_hw_control(adapter); +err_sw_init: +#ifdef CONFIG_PCI_IOV + txgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + adapter->flags2 &= ~TXGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); + +#ifndef HAVE_NO_BITMAP + bitmap_free(adapter->af_xdp_zc_qps); +#endif +err_ioremap: + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); -err_pci_disable_dev: - pci_disable_device(pdev); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); return err; } @@ -767,37 +14087,473 @@ static int txgbe_probe(struct pci_dev *pdev, * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void txgbe_remove(struct pci_dev *pdev) +static void __devexit txgbe_remove(struct pci_dev *pdev) { - struct wx *wx = pci_get_drvdata(pdev); - struct txgbe *txgbe = wx->priv; + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct txgbe_hw *hw = &adapter->hw; struct net_device *netdev; + bool disable_dev; - netdev = wx->netdev; - unregister_netdev(netdev); + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; - txgbe_remove_phy(txgbe); + mutex_destroy(&adapter->e56_lock); + hw = &adapter->hw; + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + netdev = adapter->netdev; +#ifdef HAVE_TXGBE_DEBUG_FS + txgbe_dbg_adapter_exit(adapter); +#endif + + set_bit(__TXGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + +#if IS_ENABLED(CONFIG_TPH) + if (adapter->flags & TXGBE_FLAG_TPH_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_TPH_ENABLED; + } +#endif /* CONFIG_TPH */ + +#ifdef TXGBE_SYSFS + txgbe_sysfs_exit(adapter); +#else +#ifdef TXGBE_PROCFS + txgbe_procfs_exit(adapter); +#endif +#endif /* TXGBE-SYSFS */ + +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) + /* remove the added san mac */ + txgbe_del_sanmac_netdev(netdev); + +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + +#ifdef CONFIG_PCI_IOV + txgbe_disable_sriov(adapter); +#endif + +#if IS_ENABLED(CONFIG_FCOE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + txgbe_fcoe_ddp_disable(adapter); +#endif +#endif /* CONFIG_FCOE */ + + txgbe_clear_interrupt_scheme(adapter); + txgbe_release_hw_control(adapter); + +#ifdef HAVE_DCBNL_IEEE + kfree(adapter->txgbe_ieee_pfc); + kfree(adapter->txgbe_ieee_ets); + +#endif + iounmap(adapter->io_addr); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); - kfree(wx->mac_table); - wx_clear_interrupt_scheme(wx); + kfree(adapter->mac_table); + +#ifndef HAVE_NO_BITMAP + bitmap_free(adapter->af_xdp_zc_qps); +#endif + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + free_netdev(netdev); +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif + if (disable_dev) + pci_disable_device(pdev); +} + +static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) { + txgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg) +{ + struct txgbe_adapter *adapter = hw->back; + u16 value; + + if (TXGBE_REMOVED(hw->hw_addr)) + return TXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD && + txgbe_check_cfg_remove(hw, adapter->pdev)) + return TXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef HAVE_PCI_ERS +#ifdef CONFIG_PCI_IOV +static u32 txgbe_read_pci_cfg_dword(struct txgbe_hw *hw, u32 reg) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value; + + if (TXGBE_REMOVED(hw->hw_addr)) + return TXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == TXGBE_FAILED_READ_CFG_DWORD && + txgbe_check_cfg_remove(hw, adapter->pdev)) + return TXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_ERS */ + +void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value) +{ + struct txgbe_adapter *adapter = hw->back; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +#ifdef HAVE_PCI_ERS +/** + * txgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t txgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + e_info(hw, "in txgbe_io_error_detected.\n"); + + if (adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = txgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 4); + dw2 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 8); + dw3 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 12); + if (TXGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + TXGBE_VF_DEVICE_ID, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + TXGBE_VF_DEVICE_ID, vfdev); + } + /* + * There's a slim chance the VF could have been hot + * plugged, so if it is no longer present we don't need + * to issue the VFLR.Just clean up the AER in that case. + */ + if (vfdev) { + txgbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_aer_clear_nonfatal_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + + skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + + if (!test_bit(__TXGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + txgbe_close(netdev); + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + e_info(hw, "leave txgbe_io_error_detected.\n"); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * txgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t txgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + u16 value; + + e_info(hw, "in txgbe_io_slot_reset\n"); + + if (adapter->hw.mac.type == txgbe_mac_sp) + if (adapter->cmplt_to_dis) { + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &value); + value |= 0x10; + pcie_capability_write_word(adapter->pdev, PCI_EXP_DEVCTL2, value); + adapter->cmplt_to_dis = false; + } + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + /* + * After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + txgbe_reset(adapter); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_aer_clear_nonfatal_status(pdev); + e_info(hw, "exit txgbe_io_slot_reset\n"); + + return result; +} + +/** + * txgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void txgbe_io_resume(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + e_info(hw, "in io_resume.\n"); + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + +#endif + rtnl_lock(); + if (netif_running(netdev)) + txgbe_open(netdev); + + netif_device_attach(netdev); + rtnl_unlock(); + e_info(hw, "exit io_resume.\n"); +} + +#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +static const struct pci_error_handlers txgbe_err_handler = { +#else +static struct pci_error_handlers txgbe_err_handler = { +#endif + .error_detected = txgbe_io_error_detected, + .slot_reset = txgbe_io_slot_reset, + .resume = txgbe_io_resume, +}; +#endif /* HAVE_PCI_ERS */ - pci_disable_device(pdev); +struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw) +{ + return ((struct txgbe_adapter *)hw->back)->netdev; } +struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = + container_of(hw, struct txgbe_adapter, hw); + return (struct txgbe_msg *)&adapter->msg_enable; +} + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static const struct dev_pm_ops txgbe_pm_ops = { + .suspend = txgbe_suspend, + .resume = txgbe_resume, + .freeze = txgbe_freeze, + .thaw = txgbe_thaw, + .poweroff = txgbe_suspend, + .restore = txgbe_resume, +}; +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh txgbe_driver_rh = { + .sriov_configure = txgbe_pci_sriov_configure, +}; +#endif static struct pci_driver txgbe_driver = { .name = txgbe_driver_name, .id_table = txgbe_pci_tbl, .probe = txgbe_probe, - .remove = txgbe_remove, + .remove = __devexit_p(txgbe_remove), +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT + .driver = { + .pm = &txgbe_pm_ops, + }, +#else + .suspend = txgbe_suspend, + .resume = txgbe_resume, +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif +#ifndef USE_REBOOT_NOTIFIER .shutdown = txgbe_shutdown, +#endif + +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = txgbe_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &txgbe_driver_rh, +#endif + +#ifdef HAVE_PCI_ERS + .err_handler = &txgbe_err_handler +#endif }; -module_pci_driver(txgbe_driver); +bool txgbe_is_txgbe(struct pci_dev *pcidev) +{ + if (pci_dev_driver(pcidev) != &txgbe_driver) + return false; + else + return true; +} + +/** + * txgbe_init_module - Driver Registration Routine + * + * txgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init txgbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", txgbe_driver_string, txgbe_driver_version); + pr_info("%s\n", txgbe_copyright); + + txgbe_wq = create_singlethread_workqueue(txgbe_driver_name); + if (!txgbe_wq) { + pr_err("%s: Failed to create workqueue\n", txgbe_driver_name); + return -ENOMEM; + } + +#ifdef TXGBE_PROCFS + if (txgbe_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + +#ifdef HAVE_TXGBE_DEBUG_FS + txgbe_dbg_init(); +#endif + + ret = pci_register_driver(&txgbe_driver); + return ret; +} + +module_init(txgbe_init_module); + +/** + * txgbe_exit_module - Driver Exit Cleanup Routine + * + * txgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit txgbe_exit_module(void) +{ + pci_unregister_driver(&txgbe_driver); +#ifdef TXGBE_PROCFS + txgbe_procfs_topdir_exit(); +#endif + destroy_workqueue(txgbe_wq); +#ifdef HAVE_TXGBE_DEBUG_FS + txgbe_dbg_exit(); +#endif /* HAVE_TXGBE_DEBUG_FS */ +} + +module_exit(txgbe_exit_module); + +/* txgbe_main.c */ -MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); -MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); -MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..588c0331f97dca6140861ff7fd1516dbdce244b7 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c @@ -0,0 +1,716 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_mbx.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "txgbe_type.h" +#include "txgbe.h" +#include "txgbe_mbx.h" + + +/** + * txgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); + + return err; +} + +/** + * txgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = TXGBE_ERR_MBX; + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + + return err; +} + +/** + * txgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_msg, mbx_id); + + return err; +} + +/** + * txgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_ack, mbx_id); + + return err; +} + +/** + * txgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * txgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_msg, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_ack, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +int txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = txgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); +out: + return err; +} + +/** + * txgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +int txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return TXGBE_ERR_MBX; + + /* send msg */ + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = txgbe_poll_for_ack(hw, mbx_id); + + return err; +} + +/** + * txgbe_init_mbx_ops - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void txgbe_init_mbx_ops(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; +} + +/** + * txgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, TXGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + /* read and clear mirrored mailbox flags */ + v2p_mailbox |= rd32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE); + wr32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE, 0); + hw->mbx.v2p_mailbox |= v2p_mailbox & TXGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * txgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) +{ + u32 mailbox = txgbe_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : TXGBE_ERR_MBX); +} + +/** + * txgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf sts bit */ + if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf ack bit */ + if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +static int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + if (!txgbe_check_for_bit_vf(hw, (TXGBE_VXMAILBOX_RSTD | + TXGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw) +{ + int err = TXGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = txgbe_read_v2p_mailbox(hw); + if (mailbox & TXGBE_VXMAILBOX_VFU) + err = 0; + else + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * txgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err; + u16 i; + + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_vf(hw, 0); + txgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * txgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static int txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err = 0; + u16 i; + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void txgbe_init_mbx_params_vf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_vf; + mbx->ops.write = txgbe_write_mbx_vf; + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; + mbx->ops.check_for_msg = txgbe_check_for_msg_vf; + mbx->ops.check_for_ack = txgbe_check_for_ack_vf; + mbx->ops.check_for_rst = txgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +static int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) +{ + u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index)); + int err = TXGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, TXGBE_MBVFICR(index), mask); + } + + return err; +} + +/** + * txgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) +{ + u32 reg_offset = (vf < 32) ? 0 : 1; + u32 vf_shift = vf % 32; + u32 vflre = 0; + int err = TXGBE_ERR_MBX; + + vflre = rd32(hw, TXGBE_VFLRE(reg_offset)); + + if (vflre & (1 << vf_shift)) { + err = 0; + wr32(hw, TXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + int err = TXGBE_ERR_MBX; + u32 mailbox; + + while (countdown--) { + /* Take ownership of the buffer */ + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, TXGBE_PXMAILBOX(vf)); + if (mailbox & TXGBE_PXMAILBOX_PFU) { + err = 0; + break; + } + + /* Wait a bit before trying again */ + usec_delay(mbx->udelay); + } + + if (err) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + return err; +} + +/** + * txgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_pf(hw, vf); + txgbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_STS); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; + +} + +/** + * txgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_ACK); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void txgbe_init_mbx_params_pf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = TXGBE_VF_MBX_INIT_TIMEOUT; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_pf; + mbx->ops.write = txgbe_write_mbx_pf; + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; + mbx->ops.check_for_msg = txgbe_check_for_msg_pf; + mbx->ops.check_for_ack = txgbe_check_for_ack_pf; + mbx->ops.check_for_rst = txgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..4fd40404a28b6e77143fae872d4fe4d49366e39f --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h @@ -0,0 +1,206 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_mbx.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _TXGBE_MBX_H_ +#define _TXGBE_MBX_H_ + +#define TXGBE_VXMAILBOX_SIZE (16 - 1) + +/** + * VF Registers + **/ +#define TXGBE_VXMAILBOX 0x00600 +#define TXGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define TXGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define TXGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define TXGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define TXGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define TXGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define TXGBE_VXMAILBOX_R2C_BITS (TXGBE_VXMAILBOX_RSTD | \ + TXGBE_VXMAILBOX_PFSTS | TXGBE_VXMAILBOX_PFACK) + +#define TXGBE_VXMBMEM 0x00C00 /* 16*4B */ + +/** + * PF Registers + **/ +#define TXGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,63] */ +#define TXGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define TXGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define TXGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define TXGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define TXGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLRE(i) (0x004A0 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLREC(i) (0x004A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define TXGBE_MBVFICR(i) (0x00480 + (4 * (i))) /* i=[0,3] */ +#define TXGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define TXGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define TXGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define TXGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define TXGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +/** + * Messages + **/ +/* If it's a TXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is TXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define TXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define TXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define TXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define TXGBE_VT_MSGINFO_SHIFT 16 +#define TXGBE_VT_MSGINFO_VLAN_OFFLOAD_SHIFT 17 +/* bits 23:16 are used for extra info for certain messages */ +#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum txgbe_pfvf_api_rev { + txgbe_mbox_api_null, + txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + txgbe_mbox_api_21, /* API version 2.1 */ + txgbe_mbox_api_22, /* API version 2.2 */ + txgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define TXGBE_VF_RESET 0x01 /* VF requests reset */ +#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define TXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define TXGBE_VF_GET_FW_VERSION 0x11 /* get fw version */ + +/* mailbox API, version 2.1 VF requests */ +#define TXGBE_VF_SET_5TUPLE 0x20 /* VF request PF for 5-tuple filter */ +#define TXGBE_VF_QUEUE_RATE_LIMIT 0x21 /* VF request PF to set vf-queue rate limit */ + +/* mailbox API, version 2.2 VF requests */ +#define TXGBE_VF_QUEUE_RATE_LIMIT 0x21 /* VF request PF to set vf-queue rate limit */ + +#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +/* mode choices for TXGBE_VF_UPDATE_XCAST_MODE */ +enum txgbevf_xcast_modes { + TXGBEVF_XCAST_MODE_NONE = 0, + TXGBEVF_XCAST_MODE_MULTI, + TXGBEVF_XCAST_MODE_ALLMULTI, + TXGBEVF_XCAST_MODE_PROMISC, +}; + +enum txgbevf_5tuple_msg { + TXGBEVF_5T_REQ = 0, + TXGBEVF_5T_CMD, + TXGBEVF_5T_CTRL0, + TXGBEVF_5T_CTRL1, + TXGBEVF_5T_PORT, + TXGBEVF_5T_DA, + TXGBEVF_5T_SA, + TXGBEVF_5T_MAX, /* must be last */ +}; + +#define TXGBEVF_5T_ADD_SHIFT 31 + +enum txgbevf_queue_rate_limit_msg { + TXGBEVF_Q_RATE_REQ = 0, + TXGBEVF_Q_RATE_INDEX, + TXGBEVF_Q_RATE_LIMIT, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define TXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define TXGBE_VF_MC_TYPE_WORD 3 + +#define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define TXGBE_PF_NOFITY_VF_LINK_STATUS 0x1 +#define TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31) + + +/* mailbox API, version 2.0 VF requests */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define TXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define TXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define TXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define TXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define TXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define TXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define TXGBE_VF_MBX_INIT_DELAY 50 /* microseconds between retries */ + +int txgbe_read_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_write_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_read_posted_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_write_posted_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_check_for_msg(struct txgbe_hw *, u16); +int txgbe_check_for_ack(struct txgbe_hw *, u16); +int txgbe_check_for_rst(struct txgbe_hw *, u16); +void txgbe_init_mbx_ops(struct txgbe_hw *hw); +void txgbe_init_mbx_params_vf(struct txgbe_hw *); +void txgbe_init_mbx_params_pf(struct txgbe_hw *); + +#endif /* _TXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c new file mode 100644 index 0000000000000000000000000000000000000000..2a4cb1c13a5857e5a23048dcfbc20e087639eaea --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c @@ -0,0 +1,1547 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#include "txgbe_mtd.h" +#include "txgbe_kcompat.h" + +MTD_STATUS mtdHwXmdioWrite( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + IN MTD_U16 value) +{ + MTD_STATUS result = MTD_OK; + + + if (devPtr->fmtdWriteMdio != NULL) { + if (devPtr->fmtdWriteMdio(devPtr, port, dev, reg, value) == MTD_FAIL) { + result = MTD_FAIL; + MTD_DBG_INFO("fmtdWriteMdio 0x%04X failed to port=%d, dev=%d, reg=0x%04X\n", + (unsigned)(value), (unsigned)port, (unsigned)dev, (unsigned)reg); + } + } else + result = MTD_FAIL; + + return result; +} + +MTD_STATUS mtdHwXmdioRead( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + OUT MTD_U16 * data) +{ + MTD_STATUS result = MTD_OK; + + + if (devPtr->fmtdReadMdio != NULL) { + if (devPtr->fmtdReadMdio(devPtr, port, dev, reg, data) == MTD_FAIL) { + result = MTD_FAIL; + MTD_DBG_INFO("fmtdReadMdio failed from port=%d, dev=%d, reg=0x%04X\n", + (unsigned)port, (unsigned)dev, (unsigned)reg); + } + } else + result = MTD_FAIL; + + return result; +} + +/* + This macro calculates the mask for partial read/write of register's data. +*/ +#define MTD_CALC_MASK(fieldOffset, fieldLen, mask) do {\ + if ((fieldLen + fieldOffset) >= 16) \ + mask = (0 - (1 << fieldOffset)); \ + else \ + mask = (((1 << (fieldLen + fieldOffset))) - (1 << fieldOffset));\ + } while (0) + +MTD_STATUS mtdHwGetPhyRegField( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 * data) +{ + MTD_U16 tmpData; + MTD_STATUS retVal; + + retVal = mtdHwXmdioRead(devPtr, port, dev, regAddr, &tmpData); + + if (retVal != MTD_OK) { + MTD_DBG_ERROR("Failed to read register \n"); + return MTD_FAIL; + } + + mtdHwGetRegFieldFromWord(tmpData, fieldOffset, fieldLength, data); + + MTD_DBG_INFO("fOff %d, fLen %d, data 0x%04X.\n", (int)fieldOffset, + (int)fieldLength, (int)*data); + + return MTD_OK; +} + + +MTD_STATUS mtdHwSetPhyRegField( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + IN MTD_U16 data) +{ + MTD_U16 tmpData, newData; + MTD_STATUS retVal; + + retVal = mtdHwXmdioRead(devPtr, port, dev, regAddr, &tmpData); + if (retVal != MTD_OK) { + MTD_DBG_ERROR("Failed to read register \n"); + return MTD_FAIL; + } + + mtdHwSetRegFieldToWord(tmpData, data, fieldOffset, fieldLength, &newData); + + retVal = mtdHwXmdioWrite(devPtr, port, dev, regAddr, newData); + + if (retVal != MTD_OK) { + MTD_DBG_ERROR("Failed to write register \n"); + return MTD_FAIL; + } + + MTD_DBG_INFO("fieldOff %d, fieldLen %d, data 0x%x.\n", fieldOffset, + fieldLength, data); + + return MTD_OK; +} + + +MTD_STATUS mtdHwGetRegFieldFromWord( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data) +{ + /* Bits mask to be read */ + MTD_U16 mask; + + MTD_CALC_MASK(fieldOffset, fieldLength, mask); + + *data = (regData & mask) >> fieldOffset; + + return MTD_OK; +} + + +MTD_STATUS mtdHwSetRegFieldToWord( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data) +{ + /* Bits mask to be read */ + MTD_U16 mask; + + MTD_CALC_MASK(fieldOffset, fieldLength, mask); + + /* Set the desired bits to 0. */ + regData &= ~mask; + /* Set the given data into the above reset bits.*/ + regData |= ((bitFieldData << fieldOffset) & mask); + + *data = regData; + + return MTD_OK; +} + + +MTD_STATUS mtdWait(IN MTD_UINT x) +{ + msleep(x); + return MTD_OK; +} + + +/* internal device registers */ +#define MTD_REG_CCCR9 31, 0xF05E /* do not enclose in parentheses */ +#define MTD_REG_SCR 31, 0xF0F0 /* do not enclose in parentheses */ +#define MTD_REG_ECSR 31, 0xF0F5 /* do not enclose in parentheses */ +static MTD_STATUS mtdCheckDeviceCapabilities( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL * phyHasMacsec, + OUT MTD_BOOL * phyHasCopperInterface, + OUT MTD_BOOL * isE20X0Device) +{ + MTD_U8 major, minor, inc, test; + MTD_U16 abilities; + + *phyHasMacsec = MTD_TRUE; + *phyHasCopperInterface = MTD_TRUE; + *isE20X0Device = MTD_FALSE; + + if (mtdGetFirmwareVersion(devPtr, port, &major, &minor, &inc, &test) == MTD_FAIL) { + /* firmware not running will produce this case */ + major = minor = inc = test = 0; + } + + if (major == 0 && minor == 0 && inc == 0 && test == 0) { + /* no code loaded into internal processor */ + /* have to read it from the device itself the hard way */ + MTD_U16 reg2, reg3; + MTD_U16 index, index2; + MTD_U16 temp; + MTD_U16 bit16thru23[8]; + + /* save these registers */ + /* ATTEMPT(mtdHwXmdioRead(devPtr,port,MTD_REG_CCCR9,®1)); some revs can't read this register reliably */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_REG_SCR, ®2)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_REG_ECSR, ®3)); + + /* clear these bit indications */ + for (index = 0; index < 8; index++) { + bit16thru23[index] = 0; + } + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_CCCR9, 0x0300)); /* force clock on */ + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_SCR, 0x0102)); /* set access */ + mtdWait(1); + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x06D3)); /* sequence needed */ + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0593)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0513)); + mtdWait(1); + + index = 0; + index2 = 0; + while (index < 24) { + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0413)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0513)); + mtdWait(1); + + if (index >= 16) { + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_REG_ECSR, &bit16thru23[index2++])); + } else { + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_REG_ECSR, &temp)); + } + mtdWait(1); + index++; + } + + if (((bit16thru23[0] >> 11) & 1) | ((bit16thru23[1] >> 11) & 1)) { + *phyHasMacsec = MTD_FALSE; + } + if (((bit16thru23[4] >> 11) & 1) | ((bit16thru23[5] >> 11) & 1)) { + *phyHasCopperInterface = MTD_FALSE; + } + + if (((bit16thru23[6] >> 11) & 1) | ((bit16thru23[7] >> 11) & 1)) { + *isE20X0Device = MTD_TRUE; + } + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0413)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0493)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0413)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, 0x0513)); + mtdWait(1); + + /* restore the registers */ + /* ATTEMPT(mtdHwXmdioWrite(devPtr,port,MTD_REG_CCCR9,reg1)); Some revs can't read this register reliably */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_CCCR9, 0x5440)); /* set back to reset value */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_SCR, reg2)); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_REG_ECSR, reg3)); + + } else { + /* should just read it from the firmware status register */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_TUNIT_XG_EXT_STATUS, &abilities)); + if (abilities & (1 << 12)) { + *phyHasMacsec = MTD_FALSE; + } + + if (abilities & (1 << 13)) { + *phyHasCopperInterface = MTD_FALSE; + } + + if (abilities & (1 << 14)) { + *isE20X0Device = MTD_TRUE; + } + + } + + return MTD_OK; +} + +static MTD_STATUS mtdIsPhyReadyAfterReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL * phyReady) +{ + MTD_U16 val; + + *phyReady = MTD_FALSE; + + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_TUNIT_IEEE_PMA_CTRL1, 15, 1, &val)); + + if (val) { + /* if still in reset return '0' (could be coming up, or disabled by download mode) */ + *phyReady = MTD_FALSE; + } else { + /* if Phy is in normal operation */ + *phyReady = MTD_TRUE; + } + + return MTD_OK; +} + + +MTD_STATUS mtdSoftwareReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs) +{ + MTD_U16 counter; + MTD_BOOL phyReady; + /* bit self clears when done */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_TUNIT_IEEE_PMA_CTRL1, 15, 1, 1)); + + if (timeoutMs) { + counter = 0; + ATTEMPT(mtdIsPhyReadyAfterReset(devPtr, port, &phyReady)); + while (phyReady == MTD_FALSE && counter <= timeoutMs) { + ATTEMPT(mtdWait(1)); + ATTEMPT(mtdIsPhyReadyAfterReset(devPtr, port, &phyReady)); + counter++; + } + + if (counter < timeoutMs) { + return MTD_OK; + } else { + /* timed out without becoming ready */ + return MTD_FAIL; + } + } else { + return MTD_OK; + } +} + +static MTD_STATUS mtdIsPhyReadyAfterHardwareReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *phyReady) +{ + MTD_U16 val; + + *phyReady = MTD_FALSE; + + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_CUNIT_PORT_CTRL, 14, 1, &val)); + + if (val) { + /* if still in reset return '0' (could be coming up, or disabled by download mode) */ + *phyReady = MTD_FALSE; + } else { + /* if Phy is in normal operation */ + *phyReady = MTD_TRUE; + } + return MTD_OK; +} + + +MTD_STATUS mtdHardwareReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs) +{ + MTD_U16 counter; + MTD_BOOL phyReady; + + /* bit self clears when done */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_CUNIT_PORT_CTRL, 14, 1, 1)); + + if (timeoutMs) { + counter = 0; + ATTEMPT(mtdIsPhyReadyAfterHardwareReset(devPtr, port, &phyReady)); + while (phyReady == MTD_FALSE && counter <= timeoutMs) { + ATTEMPT(mtdWait(1)); + ATTEMPT(mtdIsPhyReadyAfterHardwareReset(devPtr, port, &phyReady)); + counter++; + } + if (counter < timeoutMs) + return MTD_OK; + else + return MTD_FAIL; /* timed out without becoming ready */ + } else { + return MTD_OK; + } +} + +/****************************************************************************/ + +/****************************************************************************/ +/******************************************************************* + 802.3 Clause 28 and Clause 45 + Autoneg Related Control & Status + *******************************************************************/ +/******************************************************************* + Enabling speeds for autonegotiation + Reading speeds enabled for autonegotation + Set/get pause advertisement for autonegotiation + Other Autoneg-related Control and Status (restart,disable/enable, + force master/slave/auto, checking for autoneg resolution, etc.) + *******************************************************************/ + +#define MTD_7_0010_SPEED_BIT_LENGTH 4 +#define MTD_7_0010_SPEED_BIT_POS 5 +#define MTD_7_8000_SPEED_BIT_LENGTH 2 +#define MTD_7_8000_SPEED_BIT_POS 8 +#define MTD_7_0020_SPEED_BIT_LENGTH 1 /* for 88X32X0 family and 88X33X0 family */ +#define MTD_7_0020_SPEED_BIT_POS 12 +#define MTD_7_0020_SPEED_BIT_LENGTH2 2 /* for 88X33X0 family A0 revision 2.5/5G */ +#define MTD_7_0020_SPEED_BIT_POS2 7 + +/* Bit defines for speed bits */ +#define MTD_FORCED_SPEEDS_BIT_MASK (MTD_SPEED_10M_HD_AN_DIS | MTD_SPEED_10M_FD_AN_DIS | \ + MTD_SPEED_100M_HD_AN_DIS | MTD_SPEED_100M_FD_AN_DIS) +#define MTD_LOWER_BITS_MASK 0x000F /* bits in base page */ +#define MTD_GIG_SPEED_POS 4 +#define MTD_XGIG_SPEED_POS 6 +#define MTD_2P5G_SPEED_POS 11 +#define MTD_5G_SPEED_POS 12 +#define MTD_GET_1000BT_BITS(_speedBits) ((_speedBits & (MTD_SPEED_1GIG_HD | MTD_SPEED_1GIG_FD)) \ + >> MTD_GIG_SPEED_POS) /* 1000BT bits */ +#define MTD_GET_10GBT_BIT(_speedBits) ((_speedBits & MTD_SPEED_10GIG_FD) \ + >> MTD_XGIG_SPEED_POS) /* 10GBT bit setting */ +#define MTD_GET_2P5GBT_BIT(_speedBits) ((_speedBits & MTD_SPEED_2P5GIG_FD) \ + >> MTD_2P5G_SPEED_POS) /* 2.5GBT bit setting */ +#define MTD_GET_5GBT_BIT(_speedBits) ((_speedBits & MTD_SPEED_5GIG_FD) \ + >> MTD_5G_SPEED_POS) /* 5GBT bit setting */ + + +MTD_STATUS mtdEnableSpeeds( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 speed_bits, + IN MTD_BOOL anRestart) +{ + MTD_BOOL speedForced; + MTD_U16 dummy; + MTD_U16 tempRegValue; + + if (speed_bits & MTD_FORCED_SPEEDS_BIT_MASK) { + /* tried to force the speed, this function is for autonegotiation control */ + return MTD_FAIL; + } + + if (MTD_IS_X32X0_BASE(devPtr->deviceId) && ((speed_bits & MTD_SPEED_2P5GIG_FD) || + (speed_bits & MTD_SPEED_5GIG_FD))) { + return MTD_FAIL; /* tried to advertise 2.5G/5G on a 88X32X0 chipset */ + } + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + const MTD_U16 chipRev = (devPtr->deviceId & 0xf); /* get the chip revision */ + + if (chipRev == 9 || chipRev == 5 || chipRev == 1 || /* Z2 chip revisions */ + chipRev == 8 || chipRev == 4 || chipRev == 0) /* Z1 chip revisions */ { + /* this is an X33X0 or E20X0 Z2/Z1 device and not supported (not compatible with A0) */ + return MTD_FAIL; + } + } + + /* Enable AN and set speed back to power-on default in case previously forced + Only do it if forced, to avoid an extra/unnecessary soft reset */ + ATTEMPT(mtdGetForcedSpeed(devPtr, port, &speedForced, &dummy)); + if (speedForced) { + ATTEMPT(mtdUndoForcedSpeed(devPtr, port, MTD_FALSE)); + } + + if (speed_bits == MTD_ADV_NONE) { + /* Set all speeds to be disabled + Take care of bits in 7.0010 (advertisement register, 10BT and 100BT bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0010,\ + MTD_7_0010_SPEED_BIT_POS, MTD_7_0010_SPEED_BIT_LENGTH, \ + 0)); + + /* Take care of speed bits in 7.8000 (1000BASE-T speed bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x8000,\ + MTD_7_8000_SPEED_BIT_POS, MTD_7_8000_SPEED_BIT_LENGTH, \ + 0)); + + /* Now take care of bit in 7.0020 (10GBASE-T) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0020,\ + MTD_7_0020_SPEED_BIT_POS, MTD_7_0020_SPEED_BIT_LENGTH, 0)); + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + /* Now take care of bits in 7.0020 (2.5G, 5G speed bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0020,\ + MTD_7_0020_SPEED_BIT_POS2, MTD_7_0020_SPEED_BIT_LENGTH2, 0)); + } + } else { + /* Take care of bits in 7.0010 (advertisement register, 10BT and 100BT bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0010,\ + MTD_7_0010_SPEED_BIT_POS, MTD_7_0010_SPEED_BIT_LENGTH, \ + (speed_bits & MTD_LOWER_BITS_MASK))); + + /* Take care of speed bits in 7.8000 (1000BASE-T speed bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x8000,\ + MTD_7_8000_SPEED_BIT_POS, MTD_7_8000_SPEED_BIT_LENGTH, \ + MTD_GET_1000BT_BITS(speed_bits))); + + + /* Now take care of bits in 7.0020 (10GBASE-T first) */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, 7, 0x0020, &tempRegValue)); + ATTEMPT(mtdHwSetRegFieldToWord(tempRegValue, MTD_GET_10GBT_BIT(speed_bits),\ + MTD_7_0020_SPEED_BIT_POS, MTD_7_0020_SPEED_BIT_LENGTH, \ + &tempRegValue)); + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + /* Now take care of 2.5G bit in 7.0020 */ + ATTEMPT(mtdHwSetRegFieldToWord(tempRegValue, MTD_GET_2P5GBT_BIT(speed_bits),\ + 7, 1, \ + &tempRegValue)); + + /* Now take care of 5G bit in 7.0020 */ + ATTEMPT(mtdHwSetRegFieldToWord(tempRegValue, MTD_GET_5GBT_BIT(speed_bits),\ + 8, 1, \ + &tempRegValue)); + } + + /* Now write result back to 7.0020 */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 7, 0x0020, tempRegValue)); + + if (MTD_GET_10GBT_BIT(speed_bits) || + MTD_GET_2P5GBT_BIT(speed_bits) || + MTD_GET_5GBT_BIT(speed_bits)) { + /* Set XNP on if any bit that required it was set */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0, 13, 1, 1)); + } + } + + if (anRestart) { + return ((MTD_STATUS)(mtdAutonegEnable(devPtr, port) || + mtdAutonegRestart(devPtr, port))); + } + + return MTD_OK; +} + +MTD_STATUS mtdUndoForcedSpeed( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_BOOL anRestart) +{ + + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_TUNIT_IEEE_PMA_CTRL1, 13, 1, 1)); + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_TUNIT_IEEE_PMA_CTRL1, 6, 1, 1)); + + /* when speed bits are changed, T unit sw reset is required, wait until phy is ready */ + ATTEMPT(mtdSoftwareReset(devPtr, port, 1000)); + + if (anRestart) { + return ((MTD_STATUS)(mtdAutonegEnable(devPtr, port) || + mtdAutonegRestart(devPtr, port))); + } + + return MTD_OK; +} + + + +MTD_STATUS mtdGetForcedSpeed( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *speedIsForced, + OUT MTD_U16 *forcedSpeed) +{ + MTD_U16 val, bit0, bit1, forcedSpeedBits, duplexBit; + MTD_BOOL anDisabled; + + *speedIsForced = MTD_FALSE; + *forcedSpeed = MTD_ADV_NONE; + + /* check if 7.0.12 is 0 or 1 (disabled or enabled) */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 7, 0, 12, 1, &val)); + + (val) ? (anDisabled = MTD_FALSE) : (anDisabled = MTD_TRUE); + + if (anDisabled) { + /* autoneg is disabled, see if it's forced to one of the speeds that work without AN */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_TUNIT_IEEE_PMA_CTRL1, 6, 1, &bit0)); + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_TUNIT_IEEE_PMA_CTRL1, 13, 1, &bit1)); + + /* now read the duplex bit setting */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 7, 0x8000, 4, 1, &duplexBit)); + + forcedSpeedBits = 0; + forcedSpeedBits = bit0 | (bit1 << 1); + + if (forcedSpeedBits == 0) { + /* it's set to 10BT */ + if (duplexBit) { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_10M_FD_AN_DIS; + } else { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_10M_HD_AN_DIS; + } + } else if (forcedSpeedBits == 2) { + /* it's set to 100BT */ + if (duplexBit) { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_100M_FD_AN_DIS; + } else { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_100M_HD_AN_DIS; + } + } + /* else it's set to 1000BT or 10GBT which require AN to work */ + } + + return MTD_OK; +} + +MTD_STATUS mtdAutonegRestart( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + /* set 7.0.9, restart AN */ + return (mtdHwSetPhyRegField(devPtr, port, 7, 0, + 9, 1, 1)); +} + + +MTD_STATUS mtdAutonegEnable( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + /* set 7.0.12=1, enable AN */ + return (mtdHwSetPhyRegField(devPtr, port, 7, 0, + 12, 1, 1)); +} + +/****************************************************************************** + MTD_STATUS mtdAutonegIsSpeedDuplexResolutionDone + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anSpeedResolutionDone + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + anSpeedResolutionDone - one of the following + MTD_TRUE if speed/duplex is resolved + MTD_FALSE if speed/duplex is not resolved + + Returns: + MTD_OK or MTD_FAIL, if query was successful or not + + Description: + Queries register 3.8008.11 Speed/Duplex resolved to see if autonegotiation + is resolved or in progress. See note below. This function is only to be + called if autonegotation is enabled and speed is not forced. + + anSpeedResolutionDone being MTD_TRUE, only indicates if AN has determined + the speed and duplex bits in 3.8008, which will indicate what registers + to read later for AN resolution after AN has completed. + + Side effects: + None + + Notes/Warnings: + If autonegotiation is disabled or speed is forced, this function returns + MTD_TRUE. + +******************************************************************************/ +MTD_STATUS mtdAutonegIsSpeedDuplexResolutionDone( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anSpeedResolutionDone) +{ + MTD_U16 val; + + /* read speed/duplex resolution done bit in 3.8008 bit 11 */ + if (mtdHwGetPhyRegField(devPtr, port, + 3, 0x8008, 11, 1, &val) == MTD_FAIL) { + *anSpeedResolutionDone = MTD_FALSE; + return MTD_FAIL; + } + + (val) ? (*anSpeedResolutionDone = MTD_TRUE) : (*anSpeedResolutionDone = MTD_FALSE); + + return MTD_OK; +} + + +MTD_STATUS mtdGetAutonegSpeedDuplexResolution( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U16 *speedResolution) +{ + MTD_U16 val, speed, speed2, duplex; + MTD_BOOL resDone; + + *speedResolution = MTD_ADV_NONE; + + /* check if AN is enabled */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 7, 0, 12, 1, &val)); + + if (val) { + /* an is enabled, check if speed is resolved */ + ATTEMPT(mtdAutonegIsSpeedDuplexResolutionDone(devPtr, port, &resDone)); + + if (resDone) { + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 3, 0x8008, 14, 2, &speed)); + + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 3, 0x8008, 13, 1, &duplex)); + + switch (speed) { + case MTD_CU_SPEED_10_MBPS: + if (duplex) { + *speedResolution = MTD_SPEED_10M_FD; + } else { + *speedResolution = MTD_SPEED_10M_HD; + } + break; + case MTD_CU_SPEED_100_MBPS: + if (duplex) { + *speedResolution = MTD_SPEED_100M_FD; + } else { + *speedResolution = MTD_SPEED_100M_HD; + } + break; + case MTD_CU_SPEED_1000_MBPS: + if (duplex) { + *speedResolution = MTD_SPEED_1GIG_FD; + } else { + *speedResolution = MTD_SPEED_1GIG_HD; + } + break; + case MTD_CU_SPEED_10_GBPS: /* also MTD_CU_SPEED_NBT */ + if (MTD_IS_X32X0_BASE(devPtr->deviceId)) { + *speedResolution = MTD_SPEED_10GIG_FD; /* 10G has only full duplex, ignore duplex bit */ + } else { + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 3, 0x8008, 2, 2, &speed2)); + + switch (speed2) { + case MTD_CU_SPEED_NBT_10G: + *speedResolution = MTD_SPEED_10GIG_FD; + break; + + case MTD_CU_SPEED_NBT_5G: + *speedResolution = MTD_SPEED_5GIG_FD; + break; + + case MTD_CU_SPEED_NBT_2P5G: + *speedResolution = MTD_SPEED_2P5GIG_FD; + break; + + default: + /* this is an error */ + return MTD_FAIL; + } + } + break; + default: + /* this is an error */ + return MTD_FAIL; + } + + } + + } + + return MTD_OK; +} + + +/****************************************************************************/ +MTD_STATUS mtdIsBaseTUp( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U16 *speed, + OUT MTD_BOOL *linkUp) +{ + MTD_BOOL speedIsForced; + MTD_U16 forcedSpeed, cuSpeed, cuLinkStatus; + + *linkUp = MTD_FALSE; + *speed = MTD_ADV_NONE; + + /* first check if speed is forced to one of the speeds not requiring AN to train */ + ATTEMPT(mtdGetForcedSpeed(devPtr, port, &speedIsForced, &forcedSpeed)); + + if (speedIsForced) { + /* check if the link is up at the speed it's forced to */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 3, 0x8008, 14, 2, &cuSpeed)); + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 3, 0x8008, 10, 1, &cuLinkStatus)); + + switch (forcedSpeed) { + case MTD_SPEED_10M_HD_AN_DIS: + case MTD_SPEED_10M_FD_AN_DIS: + /* might want to add checking the duplex to make sure there + * is no duplex mismatch */ + if (cuSpeed == MTD_CU_SPEED_10_MBPS) { + *speed = forcedSpeed; + } else { + *speed = MTD_SPEED_MISMATCH; + } + if (cuLinkStatus) { + *linkUp = MTD_TRUE; + } + break; + + case MTD_SPEED_100M_HD_AN_DIS: + case MTD_SPEED_100M_FD_AN_DIS: + /* might want to add checking the duplex to make sure there + * is no duplex mismatch */ + if (cuSpeed == MTD_CU_SPEED_100_MBPS) { + *speed = forcedSpeed; + } else { + *speed = MTD_SPEED_MISMATCH; + } + if (cuLinkStatus) { + *linkUp = MTD_TRUE; + } + break; + + default: + return MTD_FAIL; + } + } else { + /* must be going through AN */ + ATTEMPT(mtdGetAutonegSpeedDuplexResolution(devPtr, port, speed)); + + if (*speed != MTD_ADV_NONE) { + /* check if the link is up at the speed it's AN to */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 3, 0x8008, 10, 1, &cuLinkStatus)); + + switch (*speed) { + case MTD_SPEED_10M_HD: + case MTD_SPEED_10M_FD: + case MTD_SPEED_100M_HD: + case MTD_SPEED_100M_FD: + case MTD_SPEED_1GIG_HD: + case MTD_SPEED_1GIG_FD: + case MTD_SPEED_10GIG_FD: + case MTD_SPEED_2P5GIG_FD: + case MTD_SPEED_5GIG_FD: + if (cuLinkStatus) { + *linkUp = MTD_TRUE; + } + break; + default: + return MTD_FAIL; + } + + } + /* else link is down, and AN is in progress, */ + } + + if (*speed == MTD_SPEED_MISMATCH) { + return MTD_FAIL; + } else { + return MTD_OK; + } + +} + +MTD_STATUS mtdSetPauseAdvertisement( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U32 pauseType, + IN MTD_BOOL anRestart) +{ + /* sets/clears bits 11, 10 (A6,A5 in the tech bit field of 7.16) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0010, \ + 10, 2, (MTD_U16)pauseType)); + + if (anRestart) { + return ((MTD_STATUS)(mtdAutonegEnable(devPtr, port) || + mtdAutonegRestart(devPtr, port))); + } + + return MTD_OK; +} + + +/****************************************************************************** + MTD_STATUS mtdAutonegIsCompleted + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anStatusReady + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + anStatusReady - one of the following + MTD_TRUE if AN status registers are available to be read (7.1, 7.33, 7.32769, etc.) + MTD_FALSE if AN is not completed and AN status registers may contain old data + + Returns: + MTD_OK or MTD_FAIL, if query was successful or not + + Description: + Checks 7.1.5 for 1. If 1, returns MTD_TRUE. If not, returns MTD_FALSE. Many + autonegotiation status registers are not valid unless AN has completed + meaning 7.1.5 = 1. + + Side effects: + None + + Notes/Warnings: + Call this function before reading 7.33 or 7.32769 to check for master/slave + resolution or other negotiated parameters which are negotiated during + autonegotiation like fast retrain, fast retrain type, etc. + +******************************************************************************/ +static MTD_STATUS mtdAutonegIsCompleted( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anStatusReady) +{ + MTD_U16 val; + + /* read an completed, 7.1.5 bit */ + if (mtdHwGetPhyRegField(devPtr, port, + 7, 1, 5, 1, &val) == MTD_FAIL) { + *anStatusReady = MTD_FALSE; + return MTD_FAIL; + } + + (val) ? (*anStatusReady = MTD_TRUE) : (*anStatusReady = MTD_FALSE); + + return MTD_OK; +} + + +MTD_STATUS mtdGetLPAdvertisedPause( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *pauseBits) +{ + MTD_U16 val; + MTD_BOOL anStatusReady; + + /* Make sure AN is complete */ + ATTEMPT(mtdAutonegIsCompleted(devPtr, port, &anStatusReady)); + + if (anStatusReady == MTD_FALSE) { + *pauseBits = MTD_CLEAR_PAUSE; + return MTD_FAIL; + } + + /* get bits 11, 10 (A6,A5 in the tech bit field of 7.19) */ + if (mtdHwGetPhyRegField(devPtr, port, 7, 19, + 10, 2, &val) == MTD_FAIL) { + *pauseBits = MTD_CLEAR_PAUSE; + return MTD_FAIL; + } + + *pauseBits = (MTD_U8)val; + + return MTD_OK; +} + +/******************************************************************* + Firmware Version + *******************************************************************/ +/****************************************************************************/ +MTD_STATUS mtdGetFirmwareVersion( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *major, + OUT MTD_U8 *minor, + OUT MTD_U8 *inc, + OUT MTD_U8 *test) +{ + MTD_U16 reg_49169, reg_49170; + + ATTEMPT(mtdHwXmdioRead(devPtr, port, 1, 49169, ®_49169)); + + *major = (reg_49169 & 0xFF00) >> 8; + *minor = (reg_49169 & 0x00FF); + + ATTEMPT(mtdHwXmdioRead(devPtr, port, 1, 49170, ®_49170)); + + *inc = (reg_49170 & 0xFF00) >> 8; + *test = (reg_49170 & 0x00FF); + + /* firmware is not running if all 0's */ + if (!(*major || *minor || *inc || *test)) { + return MTD_FAIL; + } + return MTD_OK; +} + + +MTD_STATUS mtdGetPhyRevision( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_DEVICE_ID * phyRev, + OUT MTD_U8 *numPorts, + OUT MTD_U8 *thisPort) +{ + MTD_U16 temp = 0, tryCounter, temp2, baseType, reportedHwRev; + MTD_U16 revision = 0, numports, thisport, readyBit, fwNumports, fwThisport; + MTD_BOOL registerExists, regReady, hasMacsec, hasCopper, isE20X0Device; + MTD_U8 major, minor, inc, test; + + *phyRev = MTD_REV_UNKNOWN; /* in case we have any failed ATTEMPT below, will return unknown */ + *numPorts = 0; + *thisPort = 0; + + /* first check base type of device, get reported rev and port info */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, 3, 0xD00D, &temp)); + baseType = ((temp & 0xFC00) >> 6); + reportedHwRev = (temp & 0x000F); + numports = ((temp & 0x0380) >> 7) + 1; + thisport = ((temp & 0x0070) >> 4); + + /* find out if device has macsec/ptp, copper unit or is an E20X0-type device */ + ATTEMPT(mtdCheckDeviceCapabilities(devPtr, port, &hasMacsec, &hasCopper, &isE20X0Device)); + + /* check if internal processor firmware is up and running, and if so, easier to get info */ + if (mtdGetFirmwareVersion(devPtr, port, &major, &minor, &inc, &test) == MTD_FAIL) { + major = minor = inc = test = 0; /* this is expected if firmware is not loaded/running */ + } + + if (major == 0 && minor == 0 && inc == 0 && test == 0) { + /* no firmware running, have to verify device revision */ + if (MTD_IS_X32X0_BASE(baseType)) { + /* A0 and Z2 report the same revision, need to check which is which */ + if (reportedHwRev == 1) { + /* need to figure out if it's A0 or Z2 */ + /* remove internal reset */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 3, 0xD801, 5, 1, 1)); + + /* wait until it's ready */ + regReady = MTD_FALSE; + tryCounter = 0; + while (regReady == MTD_FALSE && tryCounter++ < 10) { + ATTEMPT(mtdWait(1)); /* timeout is set to 10 ms */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 3, 0xD007, 6, 1, &readyBit)); + if (readyBit == 1) { + regReady = MTD_TRUE; + } + } + + if (regReady == MTD_FALSE) { + /* timed out, can't tell for sure what rev this is */ + *numPorts = 0; + *thisPort = 0; + *phyRev = MTD_REV_UNKNOWN; + return MTD_FAIL; + } + + /* perform test */ + registerExists = MTD_FALSE; + ATTEMPT(mtdHwXmdioRead(devPtr, port, 3, 0x8EC6, &temp)); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 3, 0x8EC6, 0xA5A5)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, 3, 0x8EC6, &temp2)); + + /* put back internal reset */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 3, 0xD801, 5, 1, 0)); + + if (temp == 0 && temp2 == 0xA5A5) { + registerExists = MTD_TRUE; + } + + if (registerExists == MTD_TRUE) { + revision = 2; /* this is actually QA0 */ + } else { + revision = reportedHwRev; /* this is a QZ2 */ + } + + } else { + /* it's not A0 or Z2, use what's reported by the hardware */ + revision = reportedHwRev; + } + } else if (MTD_IS_X33X0_BASE(baseType)) { + /* all 33X0 devices report correct revision */ + revision = reportedHwRev; + } + + /* have to use what's reported by the hardware */ + *numPorts = (MTD_U8)numports; + *thisPort = (MTD_U8)thisport; + } else { + /* there is firmware loaded/running in internal processor */ + /* can get device revision reported by firmware */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_TUNIT_PHY_REV_INFO_REG, &temp)); + ATTEMPT(mtdHwGetRegFieldFromWord(temp, 0, 4, &revision)); + ATTEMPT(mtdHwGetRegFieldFromWord(temp, 4, 3, &fwNumports)); + ATTEMPT(mtdHwGetRegFieldFromWord(temp, 7, 3, &fwThisport)); + if (fwNumports == numports && fwThisport == thisport) { + *numPorts = (MTD_U8)numports; + *thisPort = (MTD_U8)thisport; + } else { + *phyRev = MTD_REV_UNKNOWN; + *numPorts = 0; + *thisPort = 0; + return MTD_FAIL; /* firmware and hardware are reporting different values */ + } + } + + /* now have correct information to build up the MTD_DEVICE_ID */ + if (MTD_IS_X32X0_BASE(baseType)) { + temp = MTD_X32X0_BASE; + } else if (MTD_IS_X33X0_BASE(baseType)) { + temp = MTD_X33X0_BASE; + } else { + *phyRev = MTD_REV_UNKNOWN; + *numPorts = 0; + *thisPort = 0; + return MTD_FAIL; + } + + if (hasMacsec) { + temp |= MTD_MACSEC_CAPABLE; + } + + if (hasCopper) { + temp |= MTD_COPPER_CAPABLE; + } + + if (MTD_IS_X33X0_BASE(baseType) && isE20X0Device) { + temp |= MTD_E20X0_DEVICE; + } + + temp |= (revision & 0xF); + + *phyRev = (MTD_DEVICE_ID)temp; + + /* make sure we got a good one */ + if (mtdIsPhyRevisionValid(*phyRev) == MTD_OK) { + return MTD_OK; + } else { + return MTD_FAIL; /* unknown or unsupported, if recognized but unsupported, value is still valid */ + } +} + +MTD_STATUS mtdIsPhyRevisionValid(IN MTD_DEVICE_ID phyRev) +{ + switch (phyRev) { + /* list must match MTD_DEVICE_ID */ + case MTD_REV_3240P_Z2: + case MTD_REV_3240P_A0: + case MTD_REV_3240P_A1: + case MTD_REV_3220P_Z2: + case MTD_REV_3220P_A0: + + case MTD_REV_3240_Z2: + case MTD_REV_3240_A0: + case MTD_REV_3240_A1: + case MTD_REV_3220_Z2: + case MTD_REV_3220_A0: + + case MTD_REV_3310P_A0: + case MTD_REV_3320P_A0: + case MTD_REV_3340P_A0: + case MTD_REV_3310_A0: + case MTD_REV_3320_A0: + case MTD_REV_3340_A0: + + case MTD_REV_E2010P_A0: + case MTD_REV_E2020P_A0: + case MTD_REV_E2040P_A0: + case MTD_REV_E2010_A0: + case MTD_REV_E2020_A0: + case MTD_REV_E2040_A0: + + case MTD_REV_2340P_A1: + case MTD_REV_2320P_A0: + case MTD_REV_2340_A1: + case MTD_REV_2320_A0: + return MTD_OK; + + /* unsupported PHYs */ + case MTD_REV_3310P_Z1: + case MTD_REV_3320P_Z1: + case MTD_REV_3340P_Z1: + case MTD_REV_3310_Z1: + case MTD_REV_3320_Z1: + case MTD_REV_3340_Z1: + + case MTD_REV_3310P_Z2: + case MTD_REV_3320P_Z2: + case MTD_REV_3340P_Z2: + case MTD_REV_3310_Z2: + case MTD_REV_3320_Z2: + case MTD_REV_3340_Z2: + + + case MTD_REV_E2010P_Z2: + case MTD_REV_E2020P_Z2: + case MTD_REV_E2040P_Z2: + case MTD_REV_E2010_Z2: + case MTD_REV_E2020_Z2: + case MTD_REV_E2040_Z2: + default: + return MTD_FAIL; /* is either MTD_REV_UNKNOWN or not in the above list */ + } +} + +/* mtdCunit.c */ +static MTD_STATUS mtdCunitSwReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + return mtdHwSetPhyRegField(devPtr, port, MTD_CUNIT_PORT_CTRL, 15, 1, 1); +} + +/* mtdHxunit.c */ +static MTD_STATUS mtdRerunSerdesAutoInitializationUseAutoMode( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + MTD_U16 temp, temp2, temp3; + MTD_U16 waitCounter; + + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_SERDES_CTRL_STATUS, &temp)); + + ATTEMPT(mtdHwSetRegFieldToWord(temp, 3, 14, 2, &temp2)); /* execute bits and disable bits set */ + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_SERDES_CTRL_STATUS, temp2)); + + /* wait for it to be done */ + waitCounter = 0; + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_SERDES_CTRL_STATUS, &temp3)); + while ((temp3 & 0x8000) && (waitCounter < 100)) { + ATTEMPT(mtdWait(1)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_SERDES_CTRL_STATUS, &temp3)); + waitCounter++; + } + + /* if speed changed, let it stay. that's the speed that it ended up changing to/serdes was initialied to */ + if (waitCounter >= 100) { + return MTD_FAIL; /* execute timed out */ + } + + return MTD_OK; +} + + +/* mtdHunit.c */ +/****************************************************************************** + Mac Interface functions +******************************************************************************/ + +MTD_STATUS mtdSetMacInterfaceControl( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 macType, + IN MTD_BOOL macIfPowerDown, + IN MTD_U16 macIfSnoopSel, + IN MTD_U16 macIfActiveLaneSelect, + IN MTD_U16 macLinkDownSpeed, + IN MTD_U16 macMaxIfSpeed, /* 33X0/E20X0 devices only */ + IN MTD_BOOL doSwReset, + IN MTD_BOOL rerunSerdesInitialization) +{ + MTD_U16 cunitPortCtrl, cunitModeConfig; + + /* do range checking on parameters */ + if ((macType > MTD_MAC_LEAVE_UNCHANGED)) { + return MTD_FAIL; + } + + if ((macIfSnoopSel > MTD_MAC_SNOOP_LEAVE_UNCHANGED) || + (macIfSnoopSel == 1)) { + return MTD_FAIL; + } + + if (macIfActiveLaneSelect > 1) { + return MTD_FAIL; + } + + if (macLinkDownSpeed > MTD_MAC_SPEED_LEAVE_UNCHANGED) { + return MTD_FAIL; + } + + if (!(macMaxIfSpeed == MTD_MAX_MAC_SPEED_10G || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_5G || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_2P5G || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_NOT_APPLICABLE)) { + return MTD_FAIL; + } + + + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_CUNIT_PORT_CTRL, &cunitPortCtrl)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_CUNIT_MODE_CONFIG, &cunitModeConfig)); + + /* Because writes of some of these bits don't show up in the register on a read + * until after the software reset, we can't do repeated read-modify-writes + * to the same register or we will lose those changes. + + * This approach also cuts down on IO and speeds up the code + */ + + if (macType < MTD_MAC_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetRegFieldToWord(cunitPortCtrl, macType, 0, 3, &cunitPortCtrl)); + } + + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, (MTD_U16)macIfPowerDown, 3, 1, &cunitModeConfig)); + + if (macIfSnoopSel < MTD_MAC_SNOOP_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, macIfSnoopSel, 8, 2, &cunitModeConfig)); + } + + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, macIfActiveLaneSelect, 10, 1, &cunitModeConfig)); + + if (macLinkDownSpeed < MTD_MAC_SPEED_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, macLinkDownSpeed, 6, 2, &cunitModeConfig)); + } + + /* Now write changed values */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_CUNIT_PORT_CTRL, cunitPortCtrl)); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_CUNIT_MODE_CONFIG, cunitModeConfig)); + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + if (macMaxIfSpeed != MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 31, 0xF0A8, 0, 2, macMaxIfSpeed)); + } + } + + if (doSwReset == MTD_TRUE) { + ATTEMPT(mtdCunitSwReset(devPtr, port)); + + if (macLinkDownSpeed < MTD_MAC_SPEED_LEAVE_UNCHANGED) { + ATTEMPT(mtdCunitSwReset(devPtr, port)); /* need 2x for changes to macLinkDownSpeed */ + } + + if (rerunSerdesInitialization == MTD_TRUE) { + ATTEMPT(mtdRerunSerdesAutoInitializationUseAutoMode(devPtr, port)); + } + } + + return MTD_OK; +} + + +/******************************************************************************* +* mtdSemCreate +* +* DESCRIPTION: +* Create semaphore. +* +* INPUTS: +* state - beginning state of the semaphore, either MTD_SEM_EMPTY or MTD_SEM_FULL +* +* OUTPUTS: +* None +* +* RETURNS: +* MTD_SEM if success. Otherwise, NULL +* +* COMMENTS: +* None +* +*******************************************************************************/ +static MTD_SEM mtdSemCreate( + IN MTD_DEV * dev, + IN MTD_SEM_BEGIN_STATE state) +{ + if (dev->semCreate) + return dev->semCreate(state); + + return 1; /* should return any value other than 0 to let it keep going */ +} + +/******************************************************************************* +* mtdSemDelete +* +* DESCRIPTION: +* Delete semaphore. +* +* INPUTS: +* smid - semaphore Id +* +* OUTPUTS: +* None +* +* RETURNS: +* MTD_OK - on success +* MTD_FAIL - on error +* +* COMMENTS: +* None +* +*******************************************************************************/ +static MTD_STATUS mtdSemDelete( + IN MTD_DEV * dev, + IN MTD_SEM smid) +{ + if ((dev->semDelete) && (smid)) + if (dev->semDelete(smid)) + return MTD_FAIL; + + return MTD_OK; +} + + + +MTD_STATUS mtdLoadDriver( + IN FMTD_READ_MDIO readMdio, + IN FMTD_WRITE_MDIO writeMdio, + IN MTD_BOOL macsecIndirectAccess, + IN FMTD_SEM_CREATE semCreate, + IN FMTD_SEM_DELETE semDelete, + IN FMTD_SEM_TAKE semTake, + IN FMTD_SEM_GIVE semGive, + IN MTD_U16 anyPort, + OUT MTD_DEV * dev) +{ + MTD_U16 data; + + MTD_DBG_INFO("mtdLoadDriver Called.\n"); + + /* Check for parameters validity */ + if (dev == NULL) { + MTD_DBG_ERROR("MTD_DEV pointer is NULL.\n"); + return MTD_API_ERR_DEV; + } + + /* The initialization was already done. */ + if (dev->devEnabled) { + MTD_DBG_ERROR("Device Driver already loaded.\n"); + return MTD_API_ERR_DEV_ALREADY_EXIST; + } + + /* Make sure mtdWait() was implemented */ + if (mtdWait(1) == MTD_FAIL) { + MTD_DBG_ERROR("mtdWait() not implemented.\n"); + return MTD_FAIL; + } + + dev->fmtdReadMdio = readMdio; + dev->fmtdWriteMdio = writeMdio; + + dev->semCreate = semCreate; + dev->semDelete = semDelete; + dev->semTake = semTake; + dev->semGive = semGive; + dev->macsecIndirectAccess = macsecIndirectAccess; /* 88X33X0 and later force direct access */ + + /* try to read 1.0 */ + if ((mtdHwXmdioRead(dev, anyPort, 1, 0, &data)) != MTD_OK) { + MTD_DBG_ERROR("Reading to reg %x failed.\n", 0); + return MTD_API_FAIL_READ_REG; + } + + MTD_DBG_INFO("mtdLoadDriver successful.\n"); + + /* Initialize the MACsec Register Access semaphore. */ + if ((dev->multiAddrSem = mtdSemCreate(dev, MTD_SEM_FULL)) == 0) { + MTD_DBG_ERROR("semCreate Failed.\n"); + return MTD_API_FAIL_SEM_CREATE; + } + + if (dev->msec_ctrl.msec_rev == MTD_MSEC_REV_FPGA) { + dev->deviceId = MTD_REV_3310P_Z2; /* verification: change if needed */ + dev->numPorts = 1; /* verification: change if needed */ + dev->thisPort = 0; + } else { + /* After everything else is done, can fill in the device id */ + if ((mtdGetPhyRevision(dev, anyPort, + &(dev->deviceId), + &(dev->numPorts), + &(dev->thisPort))) != MTD_OK) { + MTD_DBG_ERROR("mtdGetPhyRevision Failed.\n"); + return MTD_FAIL; + } + } + + + if (MTD_IS_X33X0_BASE(dev->deviceId)) { + dev->macsecIndirectAccess = MTD_FALSE; /* bug was fixed in 88X33X0 and later revisions, go direct */ + } + + dev->devEnabled = MTD_TRUE; + + MTD_DBG_INFO("mtdLoadDriver successful !!!.\n"); + + return MTD_OK; +} + +/******************************************************************************* +* mtdUnloadDriver +* +* DESCRIPTION: +* This function clears MTD_DEV structure. +* +* INPUTS: +* None. +* +* OUTPUTS: +* None. +* +* RETURNS: +* MTD_OK - on success +* MTD_FAIL - on error +* +* COMMENTS: +* 1. This function should be called only after successful execution of +* mtdLoadDriver(). +* +*******************************************************************************/ +MTD_STATUS mtdUnloadDriver(IN MTD_DEV * dev) +{ + MTD_DBG_INFO("mtdUnloadDriver Called.\n"); + + /* Delete the MACsec register access semaphore. */ + if (mtdSemDelete(dev, dev->multiAddrSem) != MTD_OK) { + MTD_DBG_ERROR("SemDelete Failed.\n"); + return MTD_API_FAIL_SEM_DELETE; + } + + dev->fmtdReadMdio = NULL; + dev->fmtdWriteMdio = NULL; + + dev->semCreate = NULL; + dev->semDelete = NULL; + dev->semTake = NULL; + dev->semGive = NULL; + + dev->devEnabled = MTD_FALSE; + + return MTD_OK; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h new file mode 100644 index 0000000000000000000000000000000000000000..c9f2e2f2c5c050261fd6aa1ebdf54a6528161270 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h @@ -0,0 +1,1683 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _TXGBE_MTD_H_ +#define _TXGBE_MTD_H_ + +#define C_LINKAGE 1 /* set to 1 if C compile/linkage on C files is desired with C++ */ + +#if C_LINKAGE +#if defined __cplusplus + extern "C" { +#endif +#endif + +/* general */ + +#undef IN +#define IN +#undef OUT +#define OUT +#undef INOUT +#define INOUT + +#ifndef NULL +#define NULL ((void *)0) +#endif + +typedef void MTD_VOID; +typedef char MTD_8; +typedef short MTD_16; +typedef long MTD_32; +typedef long long MTD_64; + +typedef unsigned char MTD_U8; +typedef unsigned short MTD_U16; +typedef unsigned long MTD_U32; +typedef unsigned int MTD_UINT; +typedef int MTD_INT; +typedef signed short MTD_S16; + +typedef unsigned long long MTD_U64; + +typedef enum { + MTD_FALSE = 0, + MTD_TRUE = 1 +} MTD_BOOL; + +#define MTD_CONVERT_BOOL_TO_UINT(boolVar, uintVar) \ + {(boolVar) ? (uintVar = 1) : (uintVar = 0); } +#define MTD_CONVERT_UINT_TO_BOOL(uintVar, boolVar) \ + {(uintVar) ? (boolVar = MTD_TRUE) : (boolVar = MTD_FALSE); } +#define MTD_GET_BOOL_AS_BIT(boolVar) ((boolVar) ? 1 : 0) +#define MTD_GET_BIT_AS_BOOL(uintVar) ((uintVar) ? MTD_TRUE : MTD_FALSE) + +typedef void (*MTD_VOIDFUNCPTR) (void); /* ptr to function returning void */ +typedef MTD_U32 (*MTD_INTFUNCPTR) (void); /* ptr to function returning int */ + +typedef MTD_U32 MTD_STATUS; + +/* Defines for semaphore support */ +typedef MTD_U32 MTD_SEM; + +typedef enum { + MTD_SEM_EMPTY, + MTD_SEM_FULL +} MTD_SEM_BEGIN_STATE; + +typedef MTD_SEM (*FMTD_SEM_CREATE)(MTD_SEM_BEGIN_STATE state); +typedef MTD_STATUS (*FMTD_SEM_DELETE)(MTD_SEM semId); +typedef MTD_STATUS (*FMTD_SEM_TAKE)(MTD_SEM semId, MTD_U32 timOut); +typedef MTD_STATUS (*FMTD_SEM_GIVE)(MTD_SEM semId); + +/* Defines for mtdLoadDriver() mtdUnloadDriver() and all API functions which need MTD_DEV */ +typedef struct _MTD_DEV MTD_DEV; +typedef MTD_DEV * MTD_DEV_PTR; + +typedef MTD_STATUS (*FMTD_READ_MDIO)( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 * value); +typedef MTD_STATUS (*FMTD_WRITE_MDIO)( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 value); + + + + +/* MTD_DEVICE_ID format: */ +/* Bits 15:13 reserved */ +/* Bit 12: 1-> E20X0 device with max speed of 5G and no fiber interface */ +/* Bit 11: 1-> Macsec Capable (Macsec/PTP module included */ +/* Bit 10: 1-> Copper Capable (T unit interface included) */ +/* Bits 9:4 0x18 -> X32X0 base, 0x1A 0x33X0 base */ +/* Bits 3:0 revision/number of ports indication, see list */ +/* Following defines are for building MTD_DEVICE_ID */ +#define MTD_E20X0_DEVICE (1<<12) /* whether this is an E20X0 device group */ +#define MTD_MACSEC_CAPABLE (1<<11) /* whether the device has a Macsec/PTP module */ +#define MTD_COPPER_CAPABLE (1<<10) /* whether the device has a copper (T unit) module */ +#define MTD_X32X0_BASE (0x18<<4) /* whether the device uses X32X0 firmware base */ +#define MTD_X33X0_BASE (0x1A<<4) /* whether the device uses X33X0 firmware base */ + +/* Following macros are to test MTD_DEVICE_ID for various features */ +#define MTD_IS_E20X0_DEVICE(mTdrevId) ((MTD_BOOL)(mTdrevId & MTD_E20X0_DEVICE)) +#define MTD_IS_MACSEC_CAPABLE(mTdrevId) ((MTD_BOOL)(mTdrevId & MTD_MACSEC_CAPABLE)) +#define MTD_IS_COPPER_CAPABLE(mTdrevId) ((MTD_BOOL)(mTdrevId & MTD_COPPER_CAPABLE)) +#define MTD_IS_X32X0_BASE(mTdrevId) ((MTD_BOOL)((mTdrevId & (0x3F<<4)) == MTD_X32X0_BASE)) +#define MTD_IS_X33X0_BASE(mTdrevId) ((MTD_BOOL)((mTdrevId & (0x3F<<4)) == MTD_X33X0_BASE)) + +#define MTD_X33X0BASE_SINGLE_PORTA0 0xA +#define MTD_X33X0BASE_DUAL_PORTA0 0x6 +#define MTD_X33X0BASE_QUAD_PORTA0 0x2 + +/* WARNING: If you add/modify this list, you must also modify mtdIsPhyRevisionValid() */ +typedef enum { + MTD_REV_UNKNOWN = 0, + MTD_REV_3240P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x1), + MTD_REV_3240P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x2), + MTD_REV_3240P_A1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_3220P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x4), + MTD_REV_3220P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x5), + MTD_REV_3240_Z2 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x1), + MTD_REV_3240_A0 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x2), + MTD_REV_3240_A1 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_3220_Z2 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x4), + MTD_REV_3220_A0 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x5), + + MTD_REV_3310P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x8), /* 88X33X0 Z1 not supported starting with version 1.2 of API */ + MTD_REV_3320P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x4), + MTD_REV_3340P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x0), + MTD_REV_3310_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x8), + MTD_REV_3320_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x4), + MTD_REV_3340_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x0), + + MTD_REV_3310P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), /* 88X33X0 Z2 not supported starting with version 1.2 of API */ + MTD_REV_3320P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_3340P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + MTD_REV_3310_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_3320_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_3340_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + + MTD_REV_E2010P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), /* E20X0 Z2 not supported starting with version 1.2 of API */ + MTD_REV_E2020P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_E2040P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + MTD_REV_E2010_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_E2020_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_E2040_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + + + MTD_REV_3310P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_3320P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_3340P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + MTD_REV_3310_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_3320_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_3340_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + + MTD_REV_E2010P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_E2020P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_E2040P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + MTD_REV_E2010_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_E2020_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_E2040_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + + MTD_REV_2340P_A1 = (MTD_MACSEC_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_2320P_A0 = (MTD_MACSEC_CAPABLE | MTD_X32X0_BASE | 0x5), + MTD_REV_2340_A1 = (MTD_X32X0_BASE | 0x3), + MTD_REV_2320_A0 = (MTD_X32X0_BASE | 0x5) +} MTD_DEVICE_ID; + +typedef enum { + MTD_MSEC_REV_Z0A, + MTD_MSEC_REV_Y0A, + MTD_MSEC_REV_A0B, + MTD_MSEC_REV_FPGA, + MTD_MSEC_REV_UNKNOWN = -1 +} MTD_MSEC_REV; + +/* compatible for USB test */ +typedef struct _MTD_MSEC_CTRL { + MTD_32 dev_num; /* indicates the device number (0 if only one) when multiple devices are present on SVB.*/ + MTD_32 port_num; /* Indicates which port (0 to 4) is requesting CPU */ + MTD_U16 prev_addr; /* < Prev write address */ + MTD_U16 prev_dataL; /* < Prev dataL value */ + MTD_MSEC_REV msec_rev; /* revision */ +} MTD_MSEC_CTRL; + +struct _MTD_DEV { + MTD_DEVICE_ID deviceId; /* type of device and capabilities */ + MTD_BOOL devEnabled; /* whether mtdLoadDriver() called successfully */ + MTD_U8 numPorts; /* number of ports per device */ + MTD_U8 thisPort; /* relative port number on this device starting with 0 (not MDIO address) */ + MTD_SEM multiAddrSem; + + FMTD_READ_MDIO fmtdReadMdio; + FMTD_WRITE_MDIO fmtdWriteMdio; + + FMTD_SEM_CREATE semCreate; /* create semapore */ + FMTD_SEM_DELETE semDelete; /* delete the semapore */ + FMTD_SEM_TAKE semTake; /* try to get a semapore */ + FMTD_SEM_GIVE semGive; /* return semaphore */ + + MTD_U8 macsecIndirectAccess; /* if MTD_TRUE use internal processor to access Macsec */ + MTD_MSEC_CTRL msec_ctrl; /* structure use for internal verification */ + + void *appData; /* application specific data, anything the host wants to pass to the low layer */ +}; + + + + +#define MTD_OK 0 /* Operation succeeded */ +#define MTD_FAIL 1 /* Operation failed */ +#define MTD_PENDING 2 /* Pending */ + + + +/* bit definition */ +#define MTD_BIT_0 0x0001 +#define MTD_BIT_1 0x0002 +#define MTD_BIT_2 0x0004 +#define MTD_BIT_3 0x0008 +#define MTD_BIT_4 0x0010 +#define MTD_BIT_5 0x0020 +#define MTD_BIT_6 0x0040 +#define MTD_BIT_7 0x0080 +#define MTD_BIT_8 0x0100 +#define MTD_BIT_9 0x0200 +#define MTD_BIT_10 0x0400 +#define MTD_BIT_11 0x0800 +#define MTD_BIT_12 0x1000 +#define MTD_BIT_13 0x2000 +#define MTD_BIT_14 0x4000 +#define MTD_BIT_15 0x8000 + + + + + +#define MTD_DBG_ERROR(...) +#define MTD_DBG_INFO(...) +#define MTD_DBG_CRITIC_INFO(...) + + +#define MTD_API_MAJOR_VERSION 2 +#define MTD_API_MINOR_VERSION 0 + +/* This macro is handy for calling a function when you want to test the + return value and return MTD_FAIL, if the function returned MTD_FAIL, + otherwise continue */ +#define ATTEMPT(xFuncToTry) do { if (xFuncToTry == MTD_FAIL) {return MTD_FAIL; }} while (0) + +/* These defines are used for some registers which represent the copper + speed as a 2-bit binary number */ +#define MTD_CU_SPEED_10_MBPS 0 /* copper is 10BASE-T */ +#define MTD_CU_SPEED_100_MBPS 1 /* copper is 100BASE-TX */ +#define MTD_CU_SPEED_1000_MBPS 2 /* copper is 1000BASE-T */ +#define MTD_CU_SPEED_10_GBPS 3 /* copper is 10GBASE-T */ + +/* for 88X33X0 family: */ +#define MTD_CU_SPEED_NBT 3 /* copper is NBASE-T */ +#define MTD_CU_SPEED_NBT_10G 0 /* copper is 10GBASE-T */ +#define MTD_CU_SPEED_NBT_5G 2 /* copper is 5GBASE-T */ +#define MTD_CU_SPEED_NBT_2P5G 1 /* copper is 2.5GBASE-T */ + +#define MTD_ADV_NONE 0x0000 /* No speeds to be advertised */ +#define MTD_SPEED_10M_HD 0x0001 /* 10BT half-duplex */ +#define MTD_SPEED_10M_FD 0x0002 /* 10BT full-duplex */ +#define MTD_SPEED_100M_HD 0x0004 /* 100BASE-TX half-duplex */ +#define MTD_SPEED_100M_FD 0x0008 /* 100BASE-TX full-duplex */ +#define MTD_SPEED_1GIG_HD 0x0010 /* 1000BASE-T half-duplex */ +#define MTD_SPEED_1GIG_FD 0x0020 /* 1000BASE-T full-duplex */ +#define MTD_SPEED_10GIG_FD 0x0040 /* 10GBASE-T full-duplex */ +#define MTD_SPEED_2P5GIG_FD 0x0800 /* 2.5GBASE-T full-duplex, 88X33X0/88E20X0 family only */ +#define MTD_SPEED_5GIG_FD 0x1000 /* 5GBASE-T full-duplex, 88X33X0/88E20X0 family only */ +#define MTD_SPEED_ALL (MTD_SPEED_10M_HD | \ + MTD_SPEED_10M_FD | \ + MTD_SPEED_100M_HD | \ + MTD_SPEED_100M_FD | \ + MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD | \ + MTD_SPEED_10GIG_FD) +#define MTD_SPEED_ALL_33X0 (MTD_SPEED_10M_HD | \ + MTD_SPEED_10M_FD | \ + MTD_SPEED_100M_HD | \ + MTD_SPEED_100M_FD | \ + MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD | \ + MTD_SPEED_10GIG_FD | \ + MTD_SPEED_2P5GIG_FD |\ + MTD_SPEED_5GIG_FD) + +/* these bits are for forcing the speed and disabling autonegotiation */ +#define MTD_SPEED_10M_HD_AN_DIS 0x0080 /* Speed forced to 10BT half-duplex */ +#define MTD_SPEED_10M_FD_AN_DIS 0x0100 /* Speed forced to 10BT full-duplex */ +#define MTD_SPEED_100M_HD_AN_DIS 0x0200 /* Speed forced to 100BT half-duplex */ +#define MTD_SPEED_100M_FD_AN_DIS 0x0400 /* Speed forced to 100BT full-duplex */ + +/* this value is returned for the speed when the link status is checked and the speed has been */ +/* forced to one speed but the link is up at a different speed. it indicates an error. */ +#define MTD_SPEED_MISMATCH 0x8000 /* Speed is forced to one speed, but status indicates another */ + + +/* for macType */ +#define MTD_MAC_TYPE_RXAUI_SGMII_AN_EN (0x0) /* X32X0/X33x0, but not E20x0 */ +#define MTD_MAC_TYPE_RXAUI_SGMII_AN_DIS (0x1) /* X32x0/X3340/X3320, but not X3310/E20x0 */ +#define MTD_MAC_TYPE_XAUI_RATE_ADAPT (0x1) /* X3310,E2010 only */ +#define MTD_MAC_TYPE_RXAUI_RATE_ADAPT (0x2) +#define MTD_MAC_TYPE_XAUI (0x3) /* X3310,E2010 only */ +#define MTD_MAC_TYPE_XFI_SGMII_AN_EN (0x4) /* XFI at 10G, X33x0/E20x0 also use 5GBASE-R/2500BASE-X */ +#define MTD_MAC_TYPE_XFI_SGMII_AN_DIS (0x5) /* XFI at 10G, X33x0/E20x0 also use 5GBASE-R/2500BASE-X */ +#define MTD_MAC_TYPE_XFI_RATE_ADAPT (0x6) +#define MTD_MAC_TYPE_USXGMII (0x7) /* X33x0 only */ +#define MTD_MAC_LEAVE_UNCHANGED (0x8) /* use this option to not touch these bits */ + +/* for macIfSnoopSel */ +#define MTD_MAC_SNOOP_FROM_NETWORK (0x2) +#define MTD_MAC_SNOOP_FROM_HOST (0x3) +#define MTD_MAC_SNOOP_OFF (0x0) +#define MTD_MAC_SNOOP_LEAVE_UNCHANGED (0x4) /* use this option to not touch these bits */ +/* for macLinkDownSpeed */ +#define MTD_MAC_SPEED_10_MBPS MTD_CU_SPEED_10_MBPS +#define MTD_MAC_SPEED_100_MBPS MTD_CU_SPEED_100_MBPS +#define MTD_MAC_SPEED_1000_MBPS MTD_CU_SPEED_1000_MBPS +#define MTD_MAC_SPEED_10_GBPS MTD_CU_SPEED_10_GBPS +#define MTD_MAC_SPEED_LEAVE_UNCHANGED (0x4) +/* X33X0/E20X0 devices only for macMaxIfSpeed */ +#define MTD_MAX_MAC_SPEED_10G (0) +#define MTD_MAX_MAC_SPEED_5G (2) +#define MTD_MAX_MAC_SPEED_2P5G (3) +#define MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED (4) +#define MTD_MAX_MAC_SPEED_NOT_APPLICABLE (4) /* 32X0 devices can pass this */ + +/* 88X3240/3220 Device Number Definitions */ +#define MTD_T_UNIT_PMA_PMD 1 +#define MTD_T_UNIT_PCS_CU 3 +#define MTD_X_UNIT 3 +#define MTD_H_UNIT 4 +#define MTD_T_UNIT_AN 7 +#define MTD_XFI_DSP 30 +#define MTD_C_UNIT_GENERAL 31 +#define MTD_M_UNIT 31 + +/* 88X3240/3220 Device Number Definitions Host Redundant Mode */ +#define MTD_BASER_LANE_0 MTD_H_UNIT +#define MTD_BASER_LANE_1 MTD_X_UNIT + +/* 88X3240/3220 T Unit Registers MMD 1 */ +#define MTD_TUNIT_IEEE_PMA_CTRL1 MTD_T_UNIT_PMA_PMD, 0x0000 /* do not enclose in parentheses */ +#define MTD_TUNIT_IEEE_PMA_DEVID2 MTD_T_UNIT_PMA_PMD, 0x0003 /* do not enclose in parentheses */ +#define MTD_TUNIT_PHY_EXT_CTRL_1 MTD_T_UNIT_PMA_PMD, 0xC000 /* do not enclose in parentheses */ +#define MTD_TUNIT_XG_EXT_STATUS MTD_T_UNIT_PMA_PMD, 0xC001 /* do not enclose in parentheses */ +#define MTD_TUNIT_BIST_STATUS_REG MTD_T_UNIT_PMA_PMD, 0xC00C /* do not enclose in parentheses */ +#define MTD_TUNIT_PHY_REV_INFO_REG MTD_T_UNIT_PMA_PMD, 0xC04E /* do not enclose in parentheses */ +#define MTD_BOOT_STATUS_REG MTD_T_UNIT_PMA_PMD, 0xC050 /* do not enclose in parentheses */ + +#define MTD_TUNIT_IEEE_PCS_CTRL1 MTD_T_UNIT_PCS_CU, 0x0000 /* do not enclose in parentheses */ +/* control/status for serdes initialization */ +#define MTD_SERDES_CTRL_STATUS MTD_T_UNIT_AN, 0x800F /* do not enclose in parentheses */ +/* 88X3240/3220 C Unit Registers MMD 31 */ +#define MTD_CUNIT_MODE_CONFIG MTD_C_UNIT_GENERAL, 0xF000 /* do not enclose in parentheses */ +#define MTD_CUNIT_PORT_CTRL MTD_C_UNIT_GENERAL, 0xF001 /* do not enclose in parentheses */ + +#define MTD_API_FAIL_SEM_CREATE (0x18<<24) /*semCreate Failed. */ +#define MTD_API_FAIL_SEM_DELETE (0x19<<24) /*semDelete Failed. */ +#define MTD_API_FAIL_READ_REG (0x16<<16) /*Reading from phy reg failed. */ +#define MTD_API_ERR_DEV (0x3c<<16) /*driver struture is NULL. */ +#define MTD_API_ERR_DEV_ALREADY_EXIST (0x3e<<16) /*Device Driver already loaded. */ + + +#define MTD_CLEAR_PAUSE 0 /* clears both pause bits */ +#define MTD_SYM_PAUSE 1 /* for symmetric pause only */ +#define MTD_ASYM_PAUSE 2 /* for asymmetric pause only */ +#define MTD_SYM_ASYM_PAUSE 3 /* for both */ + + +/******************************************************************************* + mtdLoadDriver + + DESCRIPTION: + Marvell X32X0 Driver Initialization Routine. + This is the first routine that needs be called by system software. + It takes parameters from system software, and retures a pointer (*dev) + to a data structure which includes infomation related to this Marvell Phy + device. This pointer (*dev) is then used for all the API functions. + The following is the job performed by this routine: + 1. store MDIO read/write function into the given MTD_DEV structure + 2. run any device specific initialization routine + 3. create semaphore if required + 4. Initialize the deviceId + + + INPUTS: + readMdio - pointer to host's function to do MDIO read + writeMdio - point to host's function to do MDIO write + macsecIndirectAccess - MTD_TRUE to access MacSec through T-unit processor + MTD_FALSE to do direct register access + semCreate - pointer to host's function to create a semaphore, NULL + if not used + semDelete - pointer to host's function to create a semaphore, NULL + if not used + semTake - pointer to host's function to take a semaphore, NULL + if not used + semGive - pointer to host's function to give a semaphore, NULL + if not used + anyPort - port address of any port for this device + + OUTPUTS: + dev - pointer to holds device information to be used for each API call. + + RETURNS: + MTD_OK - on success + MTD_FAIL - on error + + COMMENTS: + mtdUnloadDriver is also provided to do driver cleanup. + + An MTD_DEV is required for each type of X32X0 device in the system. For + example, if there are 16 ports of X3240 and 4 ports of X3220, + two MTD_DEV are required, and one call to mtdLoadDriver() must + be made with one of the X3240 ports, and one with one of the X3220 + ports. +*******************************************************************************/ +MTD_STATUS mtdLoadDriver +( + IN FMTD_READ_MDIO readMdio, + IN FMTD_WRITE_MDIO writeMdio, + IN MTD_BOOL macsecIndirectAccess, + IN FMTD_SEM_CREATE semCreate, + IN FMTD_SEM_DELETE semDelete, + IN FMTD_SEM_TAKE semTake, + IN FMTD_SEM_GIVE semGive, + IN MTD_U16 anyPort, + OUT MTD_DEV * dev +); + +/******************************************************************************* +* mtdUnloadDriver +* +* DESCRIPTION: +* This function frees semaphore created by Marvell X2*** Driver, +* disables Device interrupt, and clears MTD_DEV structure. +* +* INPUTS: +* None. +* +* OUTPUTS: +* None. +* +* RETURNS: +* MTD_OK - on success +* MTD_FAIL - on error +* +* COMMENTS: +* 1. This function should be called only after successful execution of +* mtdLoadDriver(). +* +*******************************************************************************/ +MTD_STATUS mtdUnloadDriver +( + IN MTD_DEV * dev +); + +/****************************************************************************** +MTD_STATUS mtdHwXmdioWrite +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + IN MTD_U16 value +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + dev - MMD device address, 0-31 + reg - MMD register address + value - data to write + + Outputs: + None + + Returns: + MTD_OK - wrote successfully + MTD_FAIL - an error occurred + + Description: + Writes a 16-bit word to the MDIO + Address is in format X.Y.Z, where X selects the MDIO port (0-31), Y selects + the MMD/Device (0-31), and Z selects the register. + + Side effects: + None + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdHwXmdioWrite +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + IN MTD_U16 value +); + +/****************************************************************************** + MTD_STATUS mtdHwXmdioRead + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + OUT MTD_U16 *data + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + dev - MMD device address, 0-31 + reg - MMD register address + + Outputs: + data - Returns 16 bit word from the MDIO + + Returns: + MTD_OK - read successful + MTD_FAIL - read was unsuccessful + + Description: + Reads a 16-bit word from the MDIO + Address is in format X.Y.Z, where X selects the MDIO port (0-31), Y selects the + MMD/Device (0-31), and Z selects the register. + + Side effects: + None + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdHwXmdioRead +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + OUT MTD_U16 *data +); + + +/******************************************************************************* + MTD_STATUS mtdHwGetPhyRegField + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - The port number, 0-31 + dev - The MMD device, 0-31 + regAddr - The register's address + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to read + + Outputs: + data - The read register field + + Returns: + MTD_OK on success, or + MTD_FAIL - on error + + Description: + This function reads a specified field from a port's phy register. + It first reads the register, then returns the specified bit + field from what was read. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + Reading a register with latched bits may clear the latched bits. + Use with caution for registers with latched bits. + + To operate on several bits within a register which has latched bits + before reading the register again, first read the register with + mtdHwXmdioRead() to get the register value, then operate on the + register data repeatedly using mtdHwGetRegFieldFromWord() to + take apart the bit fields without re-reading the register again. + + This approach should also be used to reduce IO to the PHY when reading + multiple bit fields (do a single read, then grab different fields + from the register by using mtdHwGetRegFieldFromWord() repeatedly). + +*******************************************************************************/ +MTD_STATUS mtdHwGetPhyRegField +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + +/******************************************************************************* + MTD_STATUS mtdHwSetPhyRegField + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + IN MTD_U16 data + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - The port number, 0-31 + dev - The MMD device, 0-31 + regAddr - The register's address + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to write + data - Data to be written. + + Outputs: + None. + + Returns: + MTD_OK on success, or + MTD_FAIL - on error + + Description: + This function writes to specified field in a port's phy register. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16. + +*******************************************************************************/ +MTD_STATUS mtdHwSetPhyRegField +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + IN MTD_U16 data +); + +/******************************************************************************* + MTD_STATUS mtdHwGetRegFieldFromWord + ( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + regData - The data previously read from the register + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to read + + Outputs: + data - The data from the associated bit field + + Returns: + MTD_OK always + + Description: + This function grabs a value from a bitfield within a word. It could + be used to get the value of a bitfield within a word which was previously + read from the PHY. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + This register acts on data passed in. It does no hardware access. + + This function is useful if you want to do 1 register access and then + get different bit fields without doing another register access either + because there are latched bits in the register to avoid another read, + or to keep hardware IO down to improve performance/throughput. + + Example: + + MTD_U16 aword, nibble1, nibble2; + + mtdHwXmdioRead(devPtr,0,MTD_TUNIT_IEEE_PCS_CTRL1,&aword); // Read 3.0 from port 0 + mtdHwGetRegFieldFromWord(aword,0,4,&nibble1); // grab first nibble + mtdHwGetRegFieldFromWord(aword,4,4,&nibble2); // grab second nibble + +*******************************************************************************/ +MTD_STATUS mtdHwGetRegFieldFromWord +( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + +/******************************************************************************* + MTD_STATUS mtdHwSetRegFieldToWord + ( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + regData - original word to modify + bitFieldData - The data to set the register field to + (must be <= largest value for that bit field, + no range checking is done by this function) + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to write to regData + + Outputs: + This function grabs a value from a bitfield within a word. It could + be used to get the value of a bitfield within a word which was previously + read from the PHY. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + This register acts on data passed in. It does no hardware access. + + This function is useful if you want to do 1 register access and then + get different bit fields without doing another register access either + because there are latched bits in the register to avoid another read, + or to keep hardware IO down to improve performance/throughput. + + Example: + + MTD_U16 aword, nibble1, nibble2; + + mtdHwXmdioRead(devPtr,0,MTD_TUNIT_IEEE_PCS_CTRL1,&aword); // Read 3.0 from port 0 + mtdHwGetRegFieldFromWord(aword,0,4,&nibble1); // grab first nibble + mtdHwGetRegFieldFromWord(aword,4,4,&nibble2); // grab second nibble + +*******************************************************************************/ +MTD_STATUS mtdHwGetRegFieldFromWord +( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + +/******************************************************************************* + MTD_STATUS mtdHwSetRegFieldToWord + ( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + regData - original word to modify + bitFieldData - The data to set the register field to + (must be <= largest value for that bit field, + no range checking is done by this function) + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to write to regData + + Outputs: + data - The new/modified regData with the bitfield changed + + Returns: + MTD_OK always + + Description: + This function write a value to a bitfield within a word. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + This register acts on data passed in. It does no hardware access. + + This function is useful to reduce IO if several bit fields of a register + that has been read is to be changed before writing it back. + + MTD_U16 aword; + + mtdHwXmdioRead(devPtr,0,MTD_TUNIT_IEEE_PCS_CTRL1,&aword); // Read 3.0 from port 0 + mtdHwSetRegFieldToWord(aword,2,0,4,&aword); // Change first nibble to 2 + mtdHwSetRegFieldToWord(aword,3,4,4,&aword); // Change second nibble to 3 + +*******************************************************************************/ +MTD_STATUS mtdHwSetRegFieldToWord +( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + + +/****************************************************************************** +MTD_STATUS mtdWait +( + IN MTD_DEV_PTR devPtr, + IN unsigned x +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + x - number of milliseconds to wait + + Outputs: + None + + Returns: + MTD_OK if wait was successful, MTD_FAIL otherwise + + Description: + Waits X milliseconds + + Side effects: + None + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdWait +( + IN MTD_UINT x +); + +/****************************************************************************** +MTD_STATUS mtdSoftwareReset +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + timeoutMs - 0 will not wait for reset to complete, otherwise + waits 'timeout' milliseconds for reset to complete + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL if IO error or timed out + + Description: + Issues a software reset (1.0.15 <= 1) command. Resets firmware and + hardware state machines and returns non-retain bits to their hardware + reset values and retain bits keep their values through the reset. + + If timeoutMs is 0, returns immediately. If timeoutMs is non-zero, + waits up to 'timeoutMs' milliseconds looking for the reset to complete + before returning. Returns MTD_FAIL if times out. + + Side effects: + All "retain" bits keep their values through this reset. Non-"retain"-type + bits are returned to their hardware reset values following this reset. + See the Datasheet for a list of retain bits. + + Notes/Warnings: + Use mtdIsPhyReadyAfterReset() to see if the software reset is complete + before issuing any other MDIO commands following this reset or pass + in non-zero timeoutMs to have this function do it for you. + + This is a T unit software reset only. It may only be issued if the T + unit is ready (1.0.15 is 0) and the T unit is not in low power mode. + +******************************************************************************/ +MTD_STATUS mtdSoftwareReset +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs +); + +MTD_STATUS mtdHardwareReset +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs +); + +/****************************************************************************** + MTD_STATUS mtdSetMacInterfaceControl + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 macType, + IN MTD_BOOL macIfPowerDown, + IN MTD_U16 macIfSnoopSel, + IN MTD_U16 macIfActiveLaneSelect, + IN MTD_U16 macLinkDownSpeed, + IN MTD_U16 macMaxIfSpeed, - 33X0/E20X0 devices only - + IN MTD_BOOL doSwReset, + IN MTD_BOOL rerunSerdesInitialization + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - port number, 0-31 + macType - the type of MAC interface being used (the hardware interface). One of the following: + MTD_MAC_TYPE_RXAUI_SGMII_AN_EN - selects RXAUI with SGMII AN enabled + MTD_MAC_TYPE_RXAUI_SGMII_AN_DIS - selects RXAUI with SGMII AN disabled (not valid on X3310) + MTD_MAC_TYPE_XAUI_RATE_ADAPT - selects XAUI with rate matching (only valid on X3310) + MTD_MAC_TYPE_RXAUI_RATE_ADAPT - selects RXAUI with rate matching + MTD_MAC_TYPE_XAUI - selects XAUI (only valid on X3310) + MTD_MAC_TYPE_XFI_SGMII_AN_EN - selects XFI with SGMII AN enabled + MTD_MAC_TYPE_XFI_SGMII_AN_DIS - selects XFI with SGMII AN disabled + MTD_MAC_TYPE_XFI_RATE_ADAPT - selects XFI with rate matching + MTD_MAC_TYPE_USXGMII - selects USXGMII + MTD_MAC_LEAVE_UNCHANGED - option to leave this parameter unchanged/as it is + macIfPowerDown - MTD_TRUE if the host interface is always to be powered up + MTD_FALSE if the host interface can be powered down under + certain circumstances (see datasheet) + macIfSnoopSel - If snooping is requested on the other lane, selects the source + MTD_MAC_SNOOP_FROM_NETWORK - source of snooped data is to come from the network + MTD_MAC_SNOOP_FROM_HOST - source of snooped data is to come from the host + MTD_MAC_SNOOP_OFF - snooping is to be turned off + MTD_MAC_SNOOP_LEAVE_UNCHANGED - option to leave this parameter unchanged/as it is + macIfActiveLaneSelect - For redundant host mode, this selects the active lane. 0 or 1 + only. 0 selects 0 as the active lane and 1 as the standby. 1 selects the other way. + macLinkDownSpeed - The speed the mac interface should run when the media side is + link down. One of the following: + MTD_MAC_SPEED_10_MBPS + MTD_MAC_SPEED_100_MBPS + MTD_MAC_SPEED_1000_MBPS + MTD_MAC_SPEED_10_GBPS + MTD_MAC_SPEED_LEAVE_UNCHANGED + macMaxIfSpeed - For X33X0/E20X0 devices only. Can be used to limit the Mac interface speed + MTD_MAX_MAC_SPEED_10G + MTD_MAX_MAC_SPEED_5G + MTD_MAX_MAC_SPEED_2P5G + MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED + MTD_MAX_MAC_SPEED_NOT_APPLICABLE (for 32X0 devices pass this) + doSwReset - MTD_TRUE if a software reset (31.F001.15) should be done after these changes + have been made, or MTD_FALSE otherwise. See note below. + rerunSerdesInitialization - MTD_TRUE if any parameter that is likely to change the speed + of the serdes interface was performed like macLinkDownSpeed or macType will attempt + to reset the H unit serdes (this needs to be done AFTER the soft reset, so if doSwReset + is passed as MTD_FALSE, host must later call + mtdRerunSerdesAutoInitializationUseAutoMode() eventually to re-init the serdes). + + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL if a bad parameter was passed, or an IO error occurs. + + Description: + Changes the above parameters as indicated in 31.F000 and 31.F001 and + optionally does a software reset afterwards for those bits which require a + software reset to take effect. + + Side effects: + None + + Notes/Warnings: + These bits are actually in the C unit, but pertain to the host interface + control so the API called was placed here. + + Changes to the MAC type (31.F001.2:0) do not take effect until a software + reset is performed on the port. + + Changes to macLinkDownSpeed (31.F001.7:6) require 2 software resets to + take effect. This function will do 2 resets if doSwReset is MTD_TRUE + and macLinkDownSpeed is being changed. + + IMPORTANT: the readback reads back the last written value following + a software reset. Writes followed by reads without an intervening + software reset will read back the old bit value for all those bits + requiring a software. + + Because of this, read-modify-writes to different bitfields must have an + intervening software reset to pick up the latest value before doing + another read-modify-write to the register, otherwise the bitfield + may lose the value. + + Suggest always setting doSwReset to MTD_TRUE to avoid problems of + possibly losing changes. + +******************************************************************************/ +MTD_STATUS mtdSetMacInterfaceControl +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 macType, + IN MTD_BOOL macIfPowerDown, + IN MTD_U16 macIfSnoopSel, + IN MTD_U16 macIfActiveLaneSelect, + IN MTD_U16 macLinkDownSpeed, + IN MTD_U16 macMaxIfSpeed, /* 33X0/E20X0 devices only */ + IN MTD_BOOL doSwReset, + IN MTD_BOOL rerunSerdesInitialization +); + +/****************************************************************************** + MTD_STATUS mtdEnableSpeeds + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 speed_bits, + IN MTD_BOOL anRestart + ); + + Inputs: 2 + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + speed_bits - speeds to be advertised during auto-negotiation. One or more + of the following (bits logically OR together): + MTD_ADV_NONE (no bits set) + MTD_SPEED_10M_HD + MTD_SPEED_10M_FD + MTD_SPEED_100M_HD + MTD_SPEED_100M_FD + MTD_SPEED_1GIG_HD + MTD_SPEED_1GIG_FD + MTD_SPEED_10GIG_FD + MTD_SPEED_2P5GIG_FD (88X33X0/88E20X0 family only) + MTD_SPEED_5GIG_FD (88X33X0/88E20X0 family only) + MTD_SPEED_ALL + MTD_SPEED_ALL_33X0 (88X33X0/88E20X0 family only) + + anRestart - this takes the value of MTD_TRUE or MTD_FALSE and indicates + if auto-negotiation should be restarted following the speed + enable change. If this is MTD_FALSE, the change will not + take effect until AN is restarted in some other way (link + drop, toggle low power, toggle AN enable, toggle soft reset). + + If this is MTD_TRUE and AN has been disabled, it will be + enabled before being restarted. + + Outputs: + None + + Returns: + MTD_OK if action was successfully taken, MTD_FAIL if not. Also returns + MTD_FAIL if try to force the speed or try to advertise a speed not supported + on this PHY. + + Description: + This function allows the user to select the speeds to be advertised to the + link partner during auto-negotiation. + + First, this function enables auto-negotiation and XNPs by calling + mtdUndoForcedSpeed(). + + The function takes in a 16 bit value and sets the appropriate bits in MMD + 7 to have those speeds advertised. + + The function also checks if the input parameter is MTD_ADV_NONE, in which case + all speeds are disabled effectively disabling the phy from training + (but not disabling auto-negotiation). + + If anRestart is MTD_TRUE, an auto-negotiation restart is issued making the change + immediate. If anRestart is MTD_FALSE, the change will not take effect until the + next time auto-negotiation restarts. + + Side effects: + Setting speed in 1.0 to 10GBASE-T has the effect of enabling XNPs in 7.0 and + enabling auto-negotiation in 7.0. + + Notes/Warnings: + + Example: + To train the highest speed matching the far end among + either 1000BASE-T Full-duplex or 10GBASE-T: + mtdEnableSpeeds(devPtr,port,MTD_SPEED_1GIG_FD | MTD_SPEED_10GIG_FD, MTD_TRUE); + + To allow only 10GBASE-T to train: + mtdEnableSpeeds(devPtr,port,MTD_SPEED_10GIG_FD, MTD_TRUE); + + To disable all speeds (but AN will still be running, just advertising no + speeds) + mtdEnableSpeeds(devPtr,port,MTD_ADV_NONE, MTD_TRUE); + + This function is not to be used to disable autonegotiation and force the speed + to 10BASE-T or 100BASE-TX. Use mtdForceSpeed() for this. + + 88X33X0 Z1/Z2 and E20X0 Z2 are not supported starting with API version 1.2. + Version 1.2 and later require A0 revision of these devices. + +******************************************************************************/ +MTD_STATUS mtdEnableSpeeds +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 speed_bits, + IN MTD_BOOL anRestart +); + +MTD_STATUS mtdGetAutonegSpeedDuplexResolution +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U16 *speedResolution +); + +MTD_STATUS mtdAutonegIsSpeedDuplexResolutionDone +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anSpeedResolutionDone +); + +/****************************************************************************** +MTD_STATUS mtdIsBaseTUp( + IN MTD_DEV_PTR devPtr, + MTD_U16 port, + MTD_U16 *speed, + MTD_BOOL *linkUp +); + + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + speed - which speed was resolved or will be resolved by auto-negotiation, + one of following + MTD_ADV_NONE - autonegotiation is in progress or disabled + MTD_SPEED_10M_HD - speed is resolved to 10BT half-duplex + MTD_SPEED_10M_FD - speed is resolved to 10BT full-duplex + MTD_SPEED_100M_HD - speed is resolved to 100BASE-TX half-duplex + MTD_SPEED_100M_FD - speed is resolved to 100BASE-TX full-duplex + MTD_SPEED_1GIG_HD - speed is resolved to 1000BASE-T half-duplex + MTD_SPEED_1GIG_FD - speed is resolved to 1000BASE-T full-duplex + MTD_SPEED_10GIG_FD - speed is resolved to 10GBASE-T + MTD_SPEED_2P5GIG_FD - speed is resolved to 2.5GBASE-T (88X33X0/88E20X0 family only) + MTD_SPEED_5GIG_FD - speed is resolved to 5GBASE-T (88X33X0/88E20X0 family only) + + MTD_SPEED_10M_HD_AN_DIS - speed forced to 10BT half-duplex + MTD_SPEED_10M_FD_AN_DIS - speed forced to 10BT full-duplex + MTD_SPEED_100M_HD_AN_DIS - speed forced to 100BT half-duplex + MTD_SPEED_100M_FD_AN_DIS - speed forced to 100BT full-duplex + MTD_SPEED_MISMATCH - speed is forced to one of above speeds, but + indicated up at a different speed (this is an error) + + linkUp - MTD_TRUE if the copper link is up at the speed above + MTD_FALSE if the link is down or autonegotiation is in progress or + there's an error + + Returns: + MTD_OK, if query was successful, MTD_FAIL if query was unsuccessful or if there + was an error such as MTD_SPEED_MISMATCH. MTD_OK indicates speed and link + status are valid. MTD_FAIL indicates speed and link status are invalid. + + Description: + + First checks if the speed is forced to 10BT or 100BTX, and if it is returns + one of the following combinations + + Speed Forced Value Link Status Status + ------------------ ----------- ------ + MTD_SPEED_10M_HD_AN_DIS MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_10M_FD_AN_DIS MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_100M_HD_AN_DIS MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_100M_FD_AN_DIS MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + any of the above with MTD_FALSE MTD_FAIL - query failed + + MTD_SPEED_MISMATCH MTD_FALSE MTD_FAIL - link at different speed than forced value + This is an error. Link may be up at wrong speed + or training at wrong speed. + + If speed is not forced, then checks if autonegotation is resolved or in-progress and + returns one of the following combinations: + + Autonegotation Result Link Status Status + --------------------- ----------- ------ + MTD_ADV_NONE MTD_FALSE MTD_OK - autonegotiation is in progress (link is down) + MTD_SPEED_10M_HD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_10M_FD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_100M_HD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_100M_FD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_1GIG_HD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_1GIG_FD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_10GIG_FD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_2P5GIG_FD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + MTD_SPEED_5GIG_FD MTD_TRUE/FALSE MTD_OK - query successful, link status/speed valid + + any of the above with MTD_FALSE MTD_FAIL - query failed + + Side effects: + None + + Notes/Warnings: + Gives current real-time status of the copper interface. +******************************************************************************/ +MTD_STATUS mtdIsBaseTUp( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U16 *speed, + OUT MTD_BOOL *linkUp +); + + + +/****************************************************************************/ +/******************************************************************* + Firmware Version + *******************************************************************/ +/****************************************************************************/ + +/****************************************************************************** +MTD_STATUS mtdGetFirmwareVersion +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *major, + OUT MTD_U8 *minor, + OUT MTD_U8 *inc, + OUT MTD_U8 *test +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + major - major version, X.Y.Z.W, the X + minor - minor version, X.Y.Z.W, the Y + inc - incremental version, X.Y.Z.W, the Z + test - test version, X.Y.Z.W, the W, should be 0 for released code, + non-zero indicates this is a non-released code + + Returns: + MTD_FAIL if version can't be queried or firmware is in download mode + (meaning all version numbers are 0), MTD_OK otherwise + + Description: + This function reads the firmware version number and stores it in the + pointers passed in by the user. + + Side effects: + None + + Notes/Warnings: + This function returns all 0's if the phy is in download mode. The phy + application code must have started and be ready before issuing this + command. + +******************************************************************************/ +MTD_STATUS mtdGetFirmwareVersion +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *major, + OUT MTD_U8 *minor, + OUT MTD_U8 *inc, + OUT MTD_U8 *test +); + +/****************************************************************************** +MTD_STATUS mtdSetPauseAdvertisement +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U8 pauseType, + IN MTD_BOOL anRestart +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + pauseType - one of the following: + MTD_SYM_PAUSE, + MTD_ASYM_PAUSE, + MTD_SYM_ASYM_PAUSE or + MTD_CLEAR_PAUSE. + anRestart - this takes the value of MTD_TRUE or MTD_FALSE and indicates + if auto-negotiation should be restarted following the speed + enable change. If this is MTD_FALSE, the change will not + take effect until AN is restarted in some other way (link + drop, toggle low power, toggle AN enable, toggle soft reset). + + If this is MTD_TRUE and AN has been disabled, it will be + enabled before being restarted. + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL, if action was successful or failed + + Description: + This function sets the asymmetric and symmetric pause bits in the technology + ability field in the AN Advertisement register and optionally restarts + auto-negotiation to use the new values. This selects what type of pause + is to be advertised to the far end MAC during auto-negotiation. If + auto-negotiation is restarted, it is enabled first. + + Sets entire 2-bit field to the value passed in pauseType. + + To clear both bits, pass in MTD_CLEAR_PAUSE. + + Side effects: + None + + Notes/Warnings: + This function will not take effect unless the auto-negotiation is restarted. + +******************************************************************************/ +MTD_STATUS mtdSetPauseAdvertisement +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U32 pauseType, + IN MTD_BOOL anRestart +); + + +/****************************************************************************** +MTD_STATUS mtdGetLPAdvertisedPause +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *pauseBits +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + pauseBits - setting of link partner's pause bits based on bit definitions above in + mtdmtdSetPauseAdvertisement() + + Returns: + MTD_OK or MTD_FAIL, based on whether the query succeeded or failed. Returns + MTD_FAIL and MTD_CLEAR_PAUSE if AN is not complete. + + Description: + This function reads 7.19 (LP Base page ability) and returns the advertised + pause setting that was received from the link partner. + + Side effects: + None + + Notes/Warnings: + The user must make sure auto-negotiation has completed by calling + mtdAutonegIsCompleted() prior to calling this function. + +******************************************************************************/ +MTD_STATUS mtdGetLPAdvertisedPause +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *pauseBits +); + + + +/****************************************************************************** +MTD_STATUS mtdGetPhyRevision +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_DEVICE_ID *phyRev, + OUT MTD_U8 *numPorts, + OUT MTD_U8 *thisPort +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + phyRev - revision of this chip, see MTD_DEVICE_ID definition for + a list of chip revisions with different options + numPorts - number of ports on this chip (see note below) + thisPort - this port number 0-1, or 0-4 + + Returns: + MTD_OK if query was successful, MTD_FAIL if not. + + Will return MTD_FAIL on an unsupported PHY (but will attempt to + return correct version). See below for a list of unsupported PHYs. + + Description: + Determines the PHY revision and returns the value in phyRev. + See definition of MTD_DEVICE_ID for a list of available + devices and capabilities. + + Side effects: + None. + + Notes/Warnings: + The phyRev can be used to determine number PHY revision, + number of ports, which port this is from PHY perspective + (0-based indexing 0...3 or 0..2) and what capabilities + the PHY has. + + If phyRev is MTD_REV_UNKNOWN, numPorts and thisPort will be returned + as 0 and the function will return MTD_FAIL. + + If T-unit is in download mode, thisPort will be returned as 0. + + 88X33X0 Z1/Z2 is not supported starting with version 1.2 of API. + E20X0 Z2 is not supported starting with version 1.2 of API. + +******************************************************************************/ +MTD_STATUS mtdGetPhyRevision +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_DEVICE_ID *phyRev, + OUT MTD_U8 *numPorts, + OUT MTD_U8 *thisPort +); + + + +/***************************************************************************** +MTD_STATUS mtdGetForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *speedIsForced, + OUT MTD_U16 *forcedSpeed +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + speedIsForced - MTD_TRUE if an is disabled (1.0.12 == 0) AND + the speed in 1.0.13/6 is set to 10BT or 100BT (speeds which do + not require an to train). + forcedSpeed - one of the following if speedIsForced is MTD_TRUE + MTD_SPEED_10M_HD_AN_DIS - speed forced to 10BT half-duplex + MTD_SPEED_10M_FD_AN_DIS - speed forced to 10BT full-duplex + MTD_SPEED_100M_HD_AN_DIS - speed forced to 100BT half-duplex + MTD_SPEED_100M_FD_AN_DIS - speed forced to 100BT full-duplex + + Returns: + MTD_OK if the query was successful, or MTD_FAIL if not + + Description: + Checks if AN is disabled (7.0.12=0) and if the speed select in + register 1.0.13 and 1.0.6 is set to either 10BT or 100BT speeds. If + all of this is true, returns MTD_TRUE in speedIsForced along with + the speed/duplex setting in forcedSpeedBits. If any of this is + false (AN is enabled, or the speed is set to 1000BT or 10GBT), then + speedIsForced is returned MTD_FALSE and the forcedSpeedBit value + is invalid. + + Notes/Warnings: + None. + +******************************************************************************/ +MTD_STATUS mtdGetForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *speedIsForced, + OUT MTD_U16 *forcedSpeed +); + + +/***************************************************************************** +MTD_STATUS mtdUndoForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_BOOL anRestart +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + anRestart - this takes the value of MTD_TRUE or MTD_FALSE and indicates + if auto-negotiation should be restarted following the speed + enable change. If this is MTD_FALSE, the change will not + take effect until AN is restarted in some other way (link + drop, toggle low power, toggle AN enable, toggle soft reset). + + If this is MTD_TRUE and AN has been disabled, it will be + enabled before being restarted. + + Outputs: + None + + Returns: + MTD_OK if the change was successful, or MTD_FAIL if not + + Description: + Sets the speed bits in 1.0 back to the power-on default of 11b + (10GBASE-T). Enables auto-negotiation. + + Does a software reset of the T unit and wait until it is complete before + enabling AN and returning. + + Notes/Warnings: + None. + +******************************************************************************/ +MTD_STATUS mtdUndoForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_BOOL anRestart +); + + +/****************************************************************************** + MTD_STATUS mtdAutonegEnable + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL, if action was successful or not + + Description: + Re-enables auto-negotiation. + + Side effects: + + Notes/Warnings: + Restart autonegation will not take effect if AN is disabled. + +******************************************************************************/ +MTD_STATUS mtdAutonegEnable +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port +); + + + +/****************************************************************************** + MTD_STATUS mtdAutonegRestart + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL, depending on if action was successful + + Description: + Restarts auto-negotiation. The bit is self-clearing. If the link is up, + the link will drop and auto-negotiation will start again. + + Side effects: + None. + + Notes/Warnings: + Restarting auto-negotiation will have no effect if auto-negotiation is + disabled. + + This function is important as it is necessary to restart auto-negotiation + after changing many auto-negotiation settings before the changes will take + effect. + +******************************************************************************/ +MTD_STATUS mtdAutonegRestart +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port +); + + + +/****************************************************************************** +MTD_STATUS mtdIsPhyRevisionValid +( + IN MTD_DEVICE_ID phyRev +); + + + Inputs: + phyRev - a revision id to be checked against MTD_DEVICE_ID type + + Outputs: + None + + Returns: + MTD_OK if phyRev is a valid revision, MTD_FAIL otherwise + + Description: + Takes phyRev and returns MTD_OK if it is one of the MTD_DEVICE_ID + type, otherwise returns MTD_FAIL. + + Side effects: + None. + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdIsPhyRevisionValid +( + IN MTD_DEVICE_ID phyRev +); + + + + + +#if C_LINKAGE +#if defined __cplusplus +} +#endif +#endif + +#endif /* _TXGBE_MTD_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..aea928277e55320dffaf9eef7c86bf50e0fb44a1 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h @@ -0,0 +1,227 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_osdep.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* glue for the OS independent part of txgbe + * includes register access macros + */ + +#ifndef _TXGBE_OSDEP_H_ +#define _TXGBE_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "txgbe_kcompat.h" + +#define TXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define TXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define TXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define TXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define STATIC static + +#define IOMEM __iomem + +#define TXGBE_NAME "txgbe" + +/* #define DBG 1 */ + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel TXGBE_NAME ": %s: %s: " fmt, \ + adapter->netdev->name, \ + __func__, ## args))) + +#ifndef _WIN32 +#define txgbe_emerg(fmt, ...) printk(KERN_EMERG fmt, ## __VA_ARGS__) +#define txgbe_alert(fmt, ...) printk(KERN_ALERT fmt, ## __VA_ARGS__) +#define txgbe_crit(fmt, ...) printk(KERN_CRIT fmt, ## __VA_ARGS__) +#define txgbe_error(fmt, ...) printk(KERN_ERR fmt, ## __VA_ARGS__) +#define txgbe_warn(fmt, ...) printk(KERN_WARNING fmt, ## __VA_ARGS__) +#define txgbe_notice(fmt, ...) printk(KERN_NOTICE fmt, ## __VA_ARGS__) +#define txgbe_info(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#define txgbe_print(fmt, ...) printk(KERN_DEBUG fmt, ## __VA_ARGS__) +#define txgbe_trace(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#else /* _WIN32 */ +#define txgbe_error(lvl, fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-error: %s@%d, " fmt, \ + "txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* !_WIN32 */ + +#ifdef DBG +#ifndef _WIN32 +#define txgbe_debug(fmt, ...) \ + printk(KERN_DEBUG \ + "%s-debug: %s@%d, " fmt, \ + "txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#else /* _WIN32 */ +#define txgbe_debug(fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-debug: %s@%d, " fmt, \ + "txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* _WIN32 */ +#else /* DBG */ +#define txgbe_debug(fmt, ...) do {} while (0) +#endif /* DBG */ + + +#ifdef DBG +#define ASSERT(_x) BUG_ON(!(_x)) +#define DEBUGOUT(S) printk(KERN_DEBUG S) +#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGFUNC(fmt, ...) txgbe_debug(fmt, ## __VA_ARGS__) +#else +#define ASSERT(_x) do {} while (0) +#define DEBUGOUT(S) do {} while (0) +#define DEBUGOUT1(S, A...) do {} while (0) +#define DEBUGOUT2(S, A...) do {} while (0) +#define DEBUGOUT3(S, A...) do {} while (0) +#define DEBUGOUT4(S, A...) do {} while (0) +#define DEBUGOUT5(S, A...) do {} while (0) +#define DEBUGOUT6(S, A...) do {} while (0) +#define DEBUGFUNC(fmt, ...) do {} while (0) +#endif + +#define TXGBE_SFP_DETECT_RETRIES 2 + +struct txgbe_hw; +struct txgbe_msg { + u16 msg_enable; +}; +struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw); +struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw); + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(txgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(txgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define TXGBE_FAILED_READ_CFG_WORD 0xffffU +#define TXGBE_FAILED_READ_CFG_BYTE 0xffU + +extern u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet); +extern u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg); +extern void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value); + +#define TXGBE_READ_PCIE_WORD txgbe_read_pci_cfg_word +#define TXGBE_WRITE_PCIE_WORD txgbe_write_pci_cfg_word +#define TXGBE_R32_Q(h, r) txgbe_read_reg(h, r, true) + +#ifndef writeq +#define writeq(val, addr) do { writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); \ + } while (0); +#endif + +#define TXGBE_EEPROM_GRANT_ATTEMPS 100 +#define TXGBE_HTONL(_i) htonl(_i) +#define TXGBE_NTOHL(_i) ntohl(_i) +#define TXGBE_NTOHS(_i) ntohs(_i) +#define TXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define TXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +enum { + TXGBE_ERROR_SOFTWARE, + TXGBE_ERROR_POLLING, + TXGBE_ERROR_INVALID_STATE, + TXGBE_ERROR_UNSUPPORTED, + TXGBE_ERROR_ARGUMENT, + TXGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case TXGBE_ERROR_SOFTWARE: \ + case TXGBE_ERROR_CAUTION: \ + case TXGBE_ERROR_POLLING: \ + netif_warn(txgbe_hw_to_msg(hw), drv, txgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case TXGBE_ERROR_INVALID_STATE: \ + case TXGBE_ERROR_UNSUPPORTED: \ + case TXGBE_ERROR_ARGUMENT: \ + netif_err(txgbe_hw_to_msg(hw), hw, txgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) +#define UNREFERENCED_PARAMETER(_p) UNREFERENCED_1PARAMETER(_p) + +#endif /* _TXGBE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c new file mode 100644 index 0000000000000000000000000000000000000000..35a43ab9e0d9d11132f83b5ed51235bb66280f18 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c @@ -0,0 +1,1518 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_param.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include +#include + +#include "txgbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define TXGBE_MAX_NIC 32 +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define TXGBE_PARAM_INIT { [0 ... TXGBE_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when txgbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define TXGBE_PARAM(X, desc) \ + static const int __devinitconst X[TXGBE_MAX_NIC+1] = TXGBE_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(TXGBE_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else /* !module_param_array */ +#define TXGBE_PARAM(X, desc) \ + static int __devinitdata X[TXGBE_MAX_NIC+1] = TXGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif /* module_param_array */ + +/* Tx unidirectional mode + * + * Valid Range: [0, 1] + * + * Default Value: 0 + */ +TXGBE_PARAM(TX_UNIDIR_MODE, "Tx Unidirectional Mode [0, 1]"); +#define TX_DEFAULT_UNIDIR_MODE 0 + +/* ffe_main (KR/KX4/KX/SFI) + * + * Valid Range: 0-60 + * + * Default Value: 27 + */ +TXGBE_PARAM(FFE_MAIN, "TX_EQ MAIN (0 - 40)"); +#define TXGBE_DEFAULT_FFE_MAIN 27 + +/* ffe_pre + * + * Valid Range: 0-60 + * + * Default Value: 8 + */ +TXGBE_PARAM(FFE_PRE, "TX_EQ PRE (0 - 40)"); +#define TXGBE_DEFAULT_FFE_PRE 8 + +/* ffe_post + * + * Valid Range: 0-60 + * + * Default Value: 44 + */ +TXGBE_PARAM(FFE_POST, "TX_EQ POST (0 - 40)"); +#define TXGBE_DEFAULT_FFE_POST 44 + +/* ffe_set + * + * Valid Range: 0-4 + * + * Default Value: 0 + */ +TXGBE_PARAM(FFE_SET, "TX_EQ SET must choose to take effect (0 = NULL, 1 = sfi, 2 = kr, 3 = kx4, 4 = kx)"); +#define TXGBE_DEFAULT_FFE_SET 0 + +/* backplane_mode + * + * Valid Range: 0-4 + * - 0 - NULL + * - 1 - sfi + * - 2 - kr + * - 3 - kx4 + * - 4 - kx + * + * Default Value: 0 + */ +TXGBE_PARAM(backplane_mode, "Backplane Mode Support(0 = NULL, 1 = sfi, 2 = kr, 3 = kx4, 4 = kx)"); +#define TXGBE_BP_NULL 0 +#define TXGBE_BP_SFI 1 +#define TXGBE_BP_KR 2 +#define TXGBE_BP_KX4 3 +#define TXGBE_BP_KX 4 +#define TXGBE_DEFAULT_BP_MODE TXGBE_BP_NULL + +/* backplane_auto + * + * Valid Range: 0-1 + * - 0 - NO AUTO + * - 1 - AUTO + * Default Value: 0 + */ +TXGBE_PARAM(backplane_auto, "Backplane AUTO mode (0 = NO AUTO, 1 = AUTO)"); +#define TXGBE_BP_NAUTO 0 +#define TXGBE_BP_AUTO 1 +#define TXGBE_DEFAULT_BP_AUTO -1 + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +TXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); +TXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define TXGBE_INT_LEGACY 0 +#define TXGBE_INT_MSI 1 +#define TXGBE_INT_MSIX 2 +#define TXGBE_DEFAULT_INT TXGBE_INT_MSIX + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +TXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + +#if IS_ENABLED(CONFIG_TPH) +/* TPH - TLP Processing Hints + * + * This option allows the device to hint to TPH enabled processors + * which CPU should have its cache warmed with the data being + * transferred over PCIe. This can increase performance by reducing + * cache misses. txgbe hardware supports TPH for: + * tx descriptor writeback + * rx descriptor writeback + * rx data + * rx data header only (in packet split mode) + * + * enabling option 2 can cause cache thrash in some tests, particularly + * if the CPU is completely utilized + * + * Valid Range: 0 - 2 + * - 0 - disables TPH + * - 1 - enables TPH + * - 2 - enables TPH with rx data included + * + * Default Value: 2 + */ + +#define TXGBE_MAX_TPH 2 + +TXGBE_PARAM(TPH, "Disable or enable TLP Processing Hints, 0=disabled, " + "1=descriptor only, 2=descriptor and data"); +#endif /* CONFIG_TPH */ + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-64 + * - 0 - enables RSS and sets the Desc. Q's to min(64, num_online_cpus()). + * - 1-64 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +TXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 1 + */ + +#define TXGBE_DEFAULT_NUM_VMDQ 8 + +TXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, " + "2-16 enable (default=" XSTRINGIFY(TXGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 63 + +TXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/* + *Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. +*/ +TXGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 980-500000 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR (TXGBE_STATIC_ITR == 0) || \ + (TXGBE_STATIC_ITR == 1)?TXGBE_STATIC_ITR:(u16)((1000000/TXGBE_STATIC_ITR) << 2) + +TXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,980-500000), default 1"); +#define MAX_ITR TXGBE_MAX_INT_RATE +#define MIN_ITR TXGBE_MIN_INT_RATE + +#ifndef TXGBE_NO_LLI + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +#endif /* TXGBE_NO_LLI */ +#ifdef HAVE_TX_MQ +/* Flow Director packet buffer allocation level + * + * Valid Range: 1-3 + * 1 = 8k hash/2k perfect, + * 2 = 16k hash/4k perfect, + * 3 = 32k hash/8k perfect + * + * Default Value: 0 + */ +TXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n" + "\t\t\t1 = 8k hash filters or 2k perfect filters\n" + "\t\t\t2 = 16k hash filters or 4k perfect filters\n" + "\t\t\t3 = 32k hash filters or 8k perfect filters"); + +#define TXGBE_DEFAULT_FDIR_PBALLOC TXGBE_FDIR_PBALLOC_64K + +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +TXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); + +#define TXGBE_MAX_ATR_SAMPLE_RATE 255 +#define TXGBE_MIN_ATR_SAMPLE_RATE 1 +#define TXGBE_ATR_SAMPLE_RATE_OFF 0 +#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 +#endif /* HAVE_TX_MQ */ + +#if IS_ENABLED(CONFIG_FCOE) +/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables FCoE Offload + * - 1 - enables FCoE Offload + * + * Default Value: 1 + */ +TXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1"); +#endif /* CONFIG_FCOE */ + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +TXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 1 = on"); + +/* Enable/disable support for untested SFP+ modules on adapters + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 0 + */ +TXGBE_PARAM(allow_unsupported_sfp, "Allow unsupported and untested " + "SFP+ modules on adapters, default 0 = Disable"); + +TXGBE_PARAM(Fdir, "Support Flow director, default 1 = Enable, 0 = Disable "); +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +TXGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000)," + "default 0 = off"); + +/* Enable/disable support for VXLAN rx checksum offload + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 1 on hardware that supports it + */ +TXGBE_PARAM(vxlan_rx, + "VXLAN receive checksum offload (0,1), default 1 = Enable"); + +/* Rx buffer mode + * + * Valid Range: 0-1 0 = no header split, 1 = hdr split + * + * Default Value: 0 + */ +TXGBE_PARAM(RxBufferMode, "0=(default)no header split\n" + "\t\t\t1=hdr split for recognized packet\n"); + +#define TXGBE_RXBUFMODE_NO_HEADER_SPLIT 0 +#define TXGBE_RXBUFMODE_HEADER_SPLIT 1 +#define TXGBE_DEFAULT_RXBUFMODE TXGBE_RXBUFMODE_NO_HEADER_SPLIT + +/* Cloud Switch mode + * + * Valid Range: 0-1 0 = disable Cloud Switch, 1 = enable Cloud Switch + * + * Default Value: 0 + */ +TXGBE_PARAM(CloudSwitch, "Cloud Switch (0,1), default 0 = disable, 1 = enable"); + + +struct txgbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct txgbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int __devinit txgbe_validate_option(u32 *value, + struct txgbe_option *opt) +{ + int val = (int)*value; + + if (val == OPTION_UNSET) { + txgbe_info("txgbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (val) { + case OPTION_ENABLED: + txgbe_info("txgbe: %s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + txgbe_info("txgbe: %s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((val >= opt->arg.r.min && val <= opt->arg.r.max) || + val == opt->def) { + if (opt->msg) + txgbe_info("txgbe: %s set to %d, %s\n", + opt->name, val, opt->msg); + else + txgbe_info("txgbe: %s set to %d\n", + opt->name, val); + return 0; + } + break; + case list_option: { + int i; + const struct txgbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (val == ent->i) { + if (ent->str[0] != '\0') + txgbe_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG_ON(1); + } + + txgbe_info("txgbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return -1; +} + +/** + * txgbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit txgbe_check_options(struct txgbe_adapter *adapter) +{ + u32 bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct txgbe_ring_feature *feature = adapter->ring_feature; + u32 vmdq; + + if (bd >= TXGBE_MAX_NIC) { + txgbe_notice( + "Warning: no configuration for board #%d\n", bd); + txgbe_notice("Using defaults for all values\n"); +#ifndef module_param_array + bd = TXGBE_MAX_NIC; +#endif + } + { + u32 tx_unidir_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "TX_UNIDIR_MODE", + .err = + "using default of "__MODULE_STRING(TX_DEFAULT_UNIDIR_MODE), + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + +#ifdef module_param_array + if (num_TX_UNIDIR_MODE > bd) { +#endif + tx_unidir_mode = TX_UNIDIR_MODE[bd]; + if (tx_unidir_mode == OPTION_UNSET) + tx_unidir_mode = TX_UNIDIR_MODE[bd]; + txgbe_validate_option(&tx_unidir_mode, &opt); + adapter->tx_unidir_mode = tx_unidir_mode; +#ifdef module_param_array + } else { + adapter->tx_unidir_mode = 0; + } +#endif + } + + { /* MAIN */ + u32 ffe_main; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_MAIN", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_MAIN), + .def = TXGBE_DEFAULT_FFE_MAIN, + .arg = { .r = { .min = 0, + .max = 60} } + }; + +#ifdef module_param_array + if (num_FFE_MAIN > bd ) { +#endif + ffe_main = FFE_MAIN[bd]; + if (ffe_main == OPTION_UNSET) + ffe_main = FFE_MAIN[bd]; + txgbe_validate_option(&ffe_main, &opt); + adapter->ffe_main = ffe_main; +#ifdef module_param_array + } else { + adapter->ffe_main = 27; + } +#endif + } + + { /* PRE */ + u32 ffe_pre; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_PRE", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_PRE), + .def = TXGBE_DEFAULT_FFE_PRE, + .arg = { .r = { .min = 0, + .max = 60} } + }; + +#ifdef module_param_array + if (num_FFE_PRE > bd ) { +#endif + ffe_pre = FFE_PRE[bd]; + if (ffe_pre == OPTION_UNSET) + ffe_pre = FFE_PRE[bd]; + txgbe_validate_option(&ffe_pre, &opt); + adapter->ffe_pre = ffe_pre; +#ifdef module_param_array + } else { + adapter->ffe_pre = 8; + } +#endif + } + + { /* POST */ + u32 ffe_post; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_POST", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_POST), + .def = TXGBE_DEFAULT_FFE_POST, + .arg = { .r = { .min = 0, + .max = 60} } + }; + +#ifdef module_param_array + if (num_FFE_POST > bd ) { +#endif + ffe_post = FFE_POST[bd]; + if (ffe_post == OPTION_UNSET) + ffe_post = FFE_POST[bd]; + txgbe_validate_option(&ffe_post, &opt); + adapter->ffe_post = ffe_post; +#ifdef module_param_array + } else { + adapter->ffe_post = 44; + } +#endif + } + + { /* ffe_set */ + u32 ffe_set; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_SET", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_SET), + .def = TXGBE_DEFAULT_FFE_SET, + .arg = { .r = { .min = 0, + .max = 4} } + }; + +#ifdef module_param_array + if (num_FFE_SET > bd ) { +#endif + ffe_set = FFE_SET[bd]; + if (ffe_set == OPTION_UNSET) + ffe_set = FFE_SET[bd]; + txgbe_validate_option(&ffe_set, &opt); + adapter->ffe_set = ffe_set; +#ifdef module_param_array + } else { + adapter->ffe_set = 0; + } +#endif + } + + { /* backplane_mode */ + u32 bp_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "backplane_mode", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_BP_MODE), + .def = TXGBE_DEFAULT_BP_MODE, + .arg = { .r = { .min = 0, + .max = 4} } + }; + +#ifdef module_param_array + if (num_backplane_mode > bd ) { +#endif + bp_mode = backplane_mode[bd]; + if (bp_mode == OPTION_UNSET) + bp_mode = backplane_mode[bd]; + txgbe_validate_option(&bp_mode, &opt); + adapter->backplane_mode = bp_mode; +#ifdef module_param_array + } else { + adapter->backplane_mode = 0; + } +#endif + } + + { /* auto mode */ + u32 bp_auto; + static struct txgbe_option opt = { + .type = range_option, + .name = "bp_auto", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_BP_AUTO), + .def = TXGBE_DEFAULT_BP_AUTO, + .arg = { .r = { .min = 0, + .max = 2} } + }; + +#ifdef module_param_array + if (num_backplane_auto > bd ) { +#endif + bp_auto = backplane_auto[bd]; + if (bp_auto == OPTION_UNSET) + bp_auto = backplane_auto[bd]; + txgbe_validate_option(&bp_auto, &opt); + adapter->backplane_auto = bp_auto; +#ifdef module_param_array + } else { + adapter->backplane_auto = -1; + } +#endif + } + + { /* Interrupt Mode */ + u32 int_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_INT), + .def = TXGBE_DEFAULT_INT, + .arg = { .r = { .min = TXGBE_INT_LEGACY, + .max = TXGBE_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + txgbe_validate_option(&int_mode, &opt); + switch (int_mode) { + case TXGBE_INT_MSIX: + if (!(*aflags & TXGBE_FLAG_MSIX_CAPABLE)) + txgbe_info( + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case TXGBE_INT_MSI: + if (!(*aflags & TXGBE_FLAG_MSI_CAPABLE)) { + txgbe_info( + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + } + break; + case TXGBE_INT_LEGACY: + default: + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~TXGBE_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (opt.def == TXGBE_INT_MSIX && + *aflags & TXGBE_FLAG_MSIX_CAPABLE) { + *aflags |= TXGBE_FLAG_MSIX_CAPABLE; + *aflags |= TXGBE_FLAG_MSI_CAPABLE; + } else if (opt.def == TXGBE_INT_MSI && + *aflags & TXGBE_FLAG_MSI_CAPABLE) { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags |= TXGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~TXGBE_FLAG_MSI_CAPABLE; + } + } +#endif + } + { /* Multiple Queue Support */ + static struct txgbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_MQ > bd) { +#endif + u32 mq = MQ[bd]; + txgbe_validate_option(&mq, &opt); + if (mq) + *aflags |= TXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= TXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } +#endif + /* Check Interoperability */ + if ((*aflags & TXGBE_FLAG_MQ_CAPABLE) && + !(*aflags & TXGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } + } + +#if IS_ENABLED(CONFIG_TPH) + { /* TLP Processing Hints */ + static struct txgbe_option opt = { + .type = range_option, + .name = "TLP Processing Hints(TPH)", + .err = "defaulting to Enabled", + .def = TXGBE_MAX_TPH, + .arg = { .r = { .min = OPTION_DISABLED, + .max = TXGBE_MAX_TPH} } + }; + u32 tph = opt.def; + +#ifdef module_param_array + if (num_TPH > bd) { +#endif + tph = TPH[bd]; + txgbe_validate_option(&tph, &opt); + if (!tph) + *aflags &= ~TXGBE_FLAG_TPH_CAPABLE; + + /* Check Interoperability */ + if (!(*aflags & TXGBE_FLAG_TPH_CAPABLE)) { + DPRINTK(PROBE, INFO, "TPH is disabled\n"); + *aflags &= ~TXGBE_FLAG_TPH_ENABLED; + } + + if (tph == TXGBE_MAX_TPH) { + DPRINTK(PROBE, INFO, + "TPH enabled for rx data\n"); + adapter->flags |= TXGBE_FLAG_TPH_ENABLED_DATA; + } +#ifdef module_param_array + } else { + /* make sure to clear the capability flag if the + * option is disabled by default above */ + if (opt.def == OPTION_DISABLED) + *aflags &= ~TXGBE_FLAG_TPH_CAPABLE; + } +#endif + if (tph == TXGBE_MAX_TPH) + adapter->flags |= TXGBE_FLAG_TPH_ENABLED_DATA; + } +#endif /* CONFIG_TPH */ + { /* Receive-Side Scaling (RSS) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + u32 rss = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + +#ifdef module_param_array + if (num_RSS > bd) { +#endif + rss = RSS[bd]; + txgbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + else + feature[RING_F_FDIR].limit = (u16)rss; + + feature[RING_F_RSS].limit = (u16)rss; +#ifdef module_param_array + } else if (opt.def == 0) { + rss = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_FDIR].limit = (u16)rss; + feature[RING_F_RSS].limit = rss; + } +#endif + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = TXGBE_MAX_VMDQ_INDICES + } } + }; + +#ifdef module_param_array + if (num_VMDQ > bd) { +#endif + vmdq = VMDQ[bd]; + + txgbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= TXGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = (u16)vmdq; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= TXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } +#endif + /* Check Interoperability */ + if (*aflags & TXGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + u32 vfs = max_vfs[bd]; + if (txgbe_validate_option(&vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->max_vfs = vfs; + + if (vfs) + *aflags |= TXGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) { + adapter->max_vfs = 0; + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->max_vfs = opt.def; + *aflags |= TXGBE_FLAG_SRIOV_ENABLED; + } + } +#endif + + /* Check Interoperability */ + if (*aflags & TXGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & TXGBE_FLAG_SRIOV_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported on this " + "hardware. Disabling IOV.\n"); + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } else if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported while multiple " + "queues are disabled. " + "Disabling IOV.\n"); + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct txgbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + +#ifdef module_param_array + if (num_VEPA > bd) { +#endif + u32 vepa = VEPA[bd]; + txgbe_validate_option(&vepa, &opt); + if (vepa) + adapter->flags |= + TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } +#endif + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + txgbe_validate_option(&itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (u16)((1000000/itr) << 2); + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } +#ifndef TXGBE_NO_LLI + { /* Low Latency Interrupt TCP Port*/ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + +#ifdef module_param_array + if (num_LLIPort > bd) { +#endif + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + txgbe_validate_option(&adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_port = opt.def; + } +#endif + } + { /* Low Latency Interrupt on Packet Size */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + +#ifdef module_param_array + if (num_LLISize > bd) { +#endif + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + txgbe_validate_option(&adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_size = opt.def; + } +#endif + } + { /* Low Latency Interrupt EtherType*/ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + +#ifdef module_param_array + if (num_LLIEType > bd) { +#endif + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + txgbe_validate_option(&adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_etype = opt.def; + } +#endif + } + { /* LLI VLAN Priority */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + +#ifdef module_param_array + if (num_LLIVLANP > bd) { +#endif + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + txgbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_vlan_pri = opt.def; + } +#endif + } +#endif /* TXGBE_NO_LLI */ +#ifdef HAVE_TX_MQ + { /* Flow Director packet buffer allocation */ + u32 fdir_pballoc_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Flow Director packet buffer allocation", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_FDIR_PBALLOC), + .def = TXGBE_DEFAULT_FDIR_PBALLOC, + .arg = {.r = {.min = TXGBE_FDIR_PBALLOC_64K, + .max = TXGBE_FDIR_PBALLOC_256K} } + }; + const char *pstring; + + if (num_FdirPballoc > bd) { + fdir_pballoc_mode = FdirPballoc[bd]; + txgbe_validate_option(&fdir_pballoc_mode, &opt); + switch (fdir_pballoc_mode) { + case TXGBE_FDIR_PBALLOC_256K: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_256K; + pstring = "256kB"; + break; + case TXGBE_FDIR_PBALLOC_128K: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_128K; + pstring = "128kB"; + break; + case TXGBE_FDIR_PBALLOC_64K: + default: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_64K; + pstring = "64kB"; + break; + } + DPRINTK(PROBE, INFO, "Flow Director will be allocated " + "%s of packet buffer\n", pstring); + } else { + adapter->fdir_pballoc = opt.def; + } + + } + { /* Flow Director ATR Tx sample packet rate */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = TXGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = TXGBE_ATR_SAMPLE_RATE_OFF, + .max = TXGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + txgbe_validate_option(&adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + { + *aflags &= ~TXGBE_FLAG_FCOE_CAPABLE; + + { + struct txgbe_option opt = { + .type = enable_option, + .name = "Enabled/Disable FCoE offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; +#ifdef module_param_array + if (num_FCoE > bd) { +#endif + u32 fcoe = FCoE[bd]; + + txgbe_validate_option(&fcoe, &opt); + if (fcoe) + *aflags |= TXGBE_FLAG_FCOE_CAPABLE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= TXGBE_FLAG_FCOE_CAPABLE; + } +#endif + DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", + (*aflags & TXGBE_FLAG_FCOE_CAPABLE) ? + "en" : "dis"); + } + + } +#endif /* CONFIG_FCOE */ + { /* LRO - Set Large Receive Offload */ + struct txgbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_ENABLED + }; + struct net_device *netdev = adapter->netdev; + +#ifdef TXGBE_NO_LRO + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) + opt.def = OPTION_DISABLED; + +#endif +#ifdef module_param_array + if (num_LRO > bd) { +#endif + u32 lro = LRO[bd]; + txgbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; +#ifdef module_param_array + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } else { + netdev->features &= ~NETIF_F_LRO; + } +#endif +#ifdef TXGBE_NO_LRO + if ((netdev->features & NETIF_F_LRO) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } +#endif /* TXGBE_NO_LRO */ + } + { /* + * allow_unsupported_sfp - Enable/Disable support for unsupported + * and untested SFP+ modules. + */ + struct txgbe_option opt = { + .type = enable_option, + .name = "allow_unsupported_sfp", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; +#ifdef module_param_array + if (num_allow_unsupported_sfp > bd) { +#endif + u32 enable_unsupported_sfp = + allow_unsupported_sfp[bd]; + txgbe_validate_option(&enable_unsupported_sfp, &opt); + if (enable_unsupported_sfp) { + adapter->hw.allow_unsupported_sfp = true; + } else { + adapter->hw.allow_unsupported_sfp = false; + } +#ifdef module_param_array + } else if (opt.def == OPTION_ENABLED) { + adapter->hw.allow_unsupported_sfp = true; + } else { + adapter->hw.allow_unsupported_sfp = false; + } +#endif + } + + { + struct txgbe_option opt = { + .type = enable_option, + .name = "Fdir", + .err = "defaulting to disabled", + .def = OPTION_DISABLED + }; +#ifdef module_param_array + if (num_Fdir > bd) { +#endif + u32 enable_Fdir = Fdir[bd]; + txgbe_validate_option(&enable_Fdir, &opt); + if (enable_Fdir) { + adapter->hw.Fdir_enabled = true; + } else { + adapter->hw.Fdir_enabled = false; + } +#ifdef module_param_array + } else if (opt.def == OPTION_ENABLED) { + adapter->hw.Fdir_enabled = true; + } else { + adapter->hw.Fdir_enabled = false; + } +#endif + } + + { /* DMA Coalescing */ + struct txgbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this " + "hardware"; + + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + +#ifdef module_param_array + if (num_dmac_watchdog > bd) { +#endif + u32 dmac_wd = dmac_watchdog[bd]; + + txgbe_validate_option(&dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = (u16)dmac_wd; +#ifdef module_param_array + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } +#endif + } + { /* VXLAN rx offload */ + struct txgbe_option opt = { + .type = range_option, + .name = "vxlan_rx", + .err = "defaulting to 1 (enabled)", + .def = 1, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + const char *cmsg = "VXLAN rx offload not supported on this " + "hardware"; + const u32 flag = TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; + + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + opt.err = cmsg; + opt.msg = cmsg; + opt.def = 0; + opt.arg.r.max = 0; + } +#ifdef module_param_array + if (num_vxlan_rx > bd) { +#endif + u32 enable_vxlan_rx = vxlan_rx[bd]; + + txgbe_validate_option(&enable_vxlan_rx, &opt); + if (enable_vxlan_rx) + adapter->flags |= flag; + else + adapter->flags &= ~flag; +#ifdef module_param_array + } else if (opt.def) { + adapter->flags |= flag; + } else { + adapter->flags &= ~flag; + } +#endif + } + + { /* Rx buffer mode */ + u32 rx_buf_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Rx buffer mode", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_RXBUFMODE), + .def = TXGBE_DEFAULT_RXBUFMODE, + .arg = {.r = {.min = TXGBE_RXBUFMODE_NO_HEADER_SPLIT, + .max = TXGBE_RXBUFMODE_HEADER_SPLIT} } + + }; + +#ifdef module_param_array + if (num_RxBufferMode > bd) { +#endif + rx_buf_mode = RxBufferMode[bd]; + txgbe_validate_option(&rx_buf_mode, &opt); + switch (rx_buf_mode) { + case TXGBE_RXBUFMODE_NO_HEADER_SPLIT: + *aflags &= ~TXGBE_FLAG_RX_HS_ENABLED; + break; + case TXGBE_RXBUFMODE_HEADER_SPLIT: + *aflags |= TXGBE_FLAG_RX_HS_ENABLED; + break; + default: + break; + } +#ifdef module_param_array + } else { + *aflags &= ~TXGBE_FLAG_RX_HS_ENABLED; + } +#endif + + } + { /* Cloud Switch */ + struct txgbe_option opt = { + .type = range_option, + .name = "CloudSwitch", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + +#ifdef module_param_array + if (num_CloudSwitch > bd) { +#endif + u32 enable_cloudswitch = CloudSwitch[bd]; + + txgbe_validate_option(&enable_cloudswitch, &opt); + if (enable_cloudswitch) + adapter->flags |= + TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + else + adapter->flags &= + ~TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; +#ifdef module_param_array + } else if (opt.def) { + adapter->flags |= TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } else { + adapter->flags &= ~TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } +#endif + } +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c new file mode 100644 index 0000000000000000000000000000000000000000..34d7c87677b9b4222894c7750a3eb849a5dcd967 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c @@ -0,0 +1,384 @@ +#include +#include +#include + +#include "txgbe_pcierr.h" +#include "txgbe.h" +#define TXGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \ + PCI_ERR_ROOT_CMD_NONFATAL_EN| \ + PCI_ERR_ROOT_CMD_FATAL_EN) + +#ifndef PCI_ERS_RESULT_NO_AER_DRIVER +/* No AER capabilities registered for the driver */ +#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t) 6) +#endif + +static const char *aer_correctable_error_string[16] = { + "RxErr", /* Bit Position 0 */ + NULL, + NULL, + NULL, + NULL, + NULL, + "BadTLP", /* Bit Position 6 */ + "BadDLLP", /* Bit Position 7 */ + "Rollover", /* Bit Position 8 */ + NULL, + NULL, + NULL, + "Timeout", /* Bit Position 12 */ + "NonFatalErr", /* Bit Position 13 */ + "CorrIntErr", /* Bit Position 14 */ + "HeaderOF", /* Bit Position 15 */ +}; + +static const char *aer_uncorrectable_error_string[27] = { + "Undefined", /* Bit Position 0 */ + NULL, + NULL, + NULL, + "DLP", /* Bit Position 4 */ + "SDES", /* Bit Position 5 */ + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "TLP", /* Bit Position 12 */ + "FCP", /* Bit Position 13 */ + "CmpltTO", /* Bit Position 14 */ + "CmpltAbrt", /* Bit Position 15 */ + "UnxCmplt", /* Bit Position 16 */ + "RxOF", /* Bit Position 17 */ + "MalfTLP", /* Bit Position 18 */ + "ECRC", /* Bit Position 19 */ + "UnsupReq", /* Bit Position 20 */ + "ACSViol", /* Bit Position 21 */ + "UncorrIntErr", /* Bit Position 22 */ + "BlockedTLP", /* Bit Position 23 */ + "AtomicOpBlocked", /* Bit Position 24 */ + "TLPBlockedErr", /* Bit Position 25 */ + "PoisonTLPBlocked", /* Bit Position 26 */ +}; + + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +/* redefinition because centos 6 can't use pci_walk_bus in pci.h*/ + +struct rw_semaphore pci_bus_sem; +/** pci_walk_bus - walk devices on/under bus, calling callback. + * @top bus whose devices should be walked + * @cb callback to be called for each device found + * @userdata arbitrary pointer to be passed to callback. + * + * Walk the given bus, including any bridged devices + * on buses under this bus. Call the provided callback + * on each device found. + * + * We check the return of @cb each time. If it returns anything + * other than 0, we break out. + * + */ +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata) +{ + struct pci_dev *dev; + struct pci_bus *bus; + struct list_head *next; + int retval; + + bus = top; + down_read(&pci_bus_sem); + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + retval = cb(dev, userdata); + if (retval) + break; + } + up_read(&pci_bus_sem); +} +#endif + +static pci_ers_result_t merge_result(enum pci_ers_result orig, + enum pci_ers_result new) +{ + if (new == PCI_ERS_RESULT_NO_AER_DRIVER) + return PCI_ERS_RESULT_NO_AER_DRIVER; + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { + case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_RECOVERED: + orig = new; + break; + case PCI_ERS_RESULT_DISCONNECT: + if (new == PCI_ERS_RESULT_NEED_RESET) + orig = PCI_ERS_RESULT_NEED_RESET; + break; + default: + break; + } + return orig; +} + +static int txgbe_report_error_detected(struct pci_dev *dev, + pci_channel_state_t state, + enum pci_ers_result *result) +{ + pci_ers_result_t vote; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->error_detected) { + /* + * If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. + */ + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + vote = PCI_ERS_RESULT_NO_AER_DRIVER; + else + vote = PCI_ERS_RESULT_NONE; + } else { + err_handler = dev->driver->err_handler; + vote = err_handler->error_detected(dev, state); + } + + *result = merge_result(*result, vote); + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_frozen_detected(struct pci_dev *dev, void *data) +{ + return txgbe_report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int txgbe_report_mmio_enabled(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->mmio_enabled) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->mmio_enabled(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_slot_reset(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->slot_reset) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->slot_reset(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_resume(struct pci_dev *dev, void *data) +{ + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + dev->error_state = pci_channel_io_normal; + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->resume) + goto out; + + err_handler = dev->driver->err_handler; + err_handler->resume(dev); +out: + device_unlock(&dev->dev); + return 0; +} + + +void txgbe_pcie_do_recovery(struct pci_dev *dev) +{ + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + u32 reg32; + int pos; + int delay = 1; + u32 id; + u16 ctrl; + /* + * Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_walk_bus(bus, txgbe_report_frozen_detected, &status); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~TXGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double + * * this to 2ms to ensure that we meet the minimum requirement. + * */ + + msleep(2); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * Trhfa for conventional PCI is 2^25 clock cycles. + * * Assuming a minimum 33MHz clock this results in a 1s + * * delay before we can consider subordinate devices to + * * be re-initialized. PCIe has some ways to shorten this, + * * but we don't make use of them yet. + * */ + ssleep(1); + + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > 60000) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, "bus_reset"); + return; + } + + if (delay > 1000) + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, "bus_reset"); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + pci_info(dev, "ready %dms after %s\n", delay - 1, + "bus_reset"); + + pci_info(dev, "Root Port link has been reset\n"); + + if (pos) { + /* Clear Root Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); + + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= TXGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, txgbe_report_mmio_enabled, &status); + } + + if (status == PCI_ERS_RESULT_NEED_RESET) { + /* + * TODO: Should call platform-specific + * functions to reset slot before calling + * drivers' slot_reset callbacks? + */ + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, txgbe_report_slot_reset, &status); + } + + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, txgbe_report_resume, &status); + +failed: + ; +} + +void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status) +{ + unsigned long i; + const char *errmsg = NULL; + struct pci_dev *pdev = adapter->pdev; + unsigned long val = status; + + for_each_set_bit(i, &val, 32) { + if (severity == TXGBE_AER_CORRECTABLE) { + errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? + aer_correctable_error_string[i] : NULL; + } else { + errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? + aer_uncorrectable_error_string[i] : NULL; + + if (errmsg != NULL && i == 14) + adapter->cmplt_to_dis = true; + } + if (errmsg) + dev_info(&pdev->dev, " [%2ld] %-22s\n", i, errmsg); + + } +} + +bool txgbe_check_recovery_capability(struct pci_dev *dev) +{ +#if defined(__i386__) || defined(__x86_64__) + return true; +#else + /* check upstream bridge is root or PLX brigde, + * or cpu is kupeng 920 or not + */ + if (dev->bus->self->vendor == 0x10b5 || + dev->bus->self->vendor == 0x19e5) + return true; + else + return false; +#endif +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h new file mode 100644 index 0000000000000000000000000000000000000000..84cf9bc92a6fba31faa9d662811c84a4b10ccc1d --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h @@ -0,0 +1,14 @@ +#ifndef _TXGBE_PCIERR_H_ +#define _TXGBE_PCIERR_H_ + +#include "txgbe.h" + +#define TXGBE_AER_UNCORRECTABLE 1 +#define TXGBE_AER_CORRECTABLE 2 + +void txgbe_pcie_do_recovery(struct pci_dev *dev); +void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status); +bool txgbe_check_recovery_capability(struct pci_dev *dev); + +#endif + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 4159c84035fdceb318e0617d455bccb132f51ea5..54444bb43e9f6227dc341aa993009c1f3f9a0d53 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -1,835 +1,1604 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_lib.h" -#include "../libwx/wx_hw.h" -#include "txgbe_type.h" +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_phy.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + #include "txgbe_phy.h" -#include "txgbe_hw.h" +#include "txgbe_mtd.h" +#include "txgbe.h" + +/** + * txgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +s32 txgbe_check_reset_blocked(struct txgbe_hw *hw) +{ + u32 mmngc; + + mmngc = rd32(hw, TXGBE_MIS_ST); + if (mmngc & TXGBE_MIS_ST_MNG_VETO) { + return true; + } + + return false; +} -static int txgbe_swnodes_register(struct txgbe *txgbe) + +/** + * txgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +s32 txgbe_get_phy_id(struct txgbe_hw *hw) { - struct txgbe_nodes *nodes = &txgbe->nodes; - struct pci_dev *pdev = txgbe->wx->pdev; - struct software_node *swnodes; - u32 id; + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u8 numport, thisport; + u32 i = 0; + + if (hw->mac.type == txgbe_mac_aml) { + hw->phy.addr = 0; + + for (i = 0; i < 32; i++) { + hw->phy.addr = i; + status = txgbe_read_phy_reg_mdi(hw, TXGBE_MDIO_PHY_ID_HIGH, 0, &phy_id_high); + if (status) { + printk("txgbe_read_phy_reg_mdi failed 1\n"); + return status; + } + printk("%d: phy_id_high 0x%x\n", i, phy_id_high); + if ((phy_id_high & 0xFFFF) == 0x0141) { + break; + } + } - id = pci_dev_id(pdev); + if (i == 32) { + printk("txgbe_read_phy_reg_mdi failed\n"); + return TXGBE_ERR_PHY; + } - snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id); - snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id); - snprintf(nodes->sfp_name, sizeof(nodes->sfp_name), "txgbe_sfp-%x", id); - snprintf(nodes->phylink_name, sizeof(nodes->phylink_name), "txgbe_phylink-%x", id); + status = txgbe_read_phy_reg_mdi(hw, TXGBE_MDIO_PHY_ID_LOW, 0, &phy_id_low); + if (status) { + printk("txgbe_read_phy_reg_mdi failed 2\n"); + return status; + } + hw->phy.id = (u32)(phy_id_high & 0xFFFF) << 6; + hw->phy.id |= (u32)((phy_id_low & 0xFC00) >> 10); - swnodes = nodes->swnodes; + printk("txgbe_get_phy_id: phy_id 0x%x", hw->phy.id); - /* GPIO 0: tx fault - * GPIO 1: tx disable - * GPIO 2: sfp module absent - * GPIO 3: rx signal lost - * GPIO 4: rate select, 1G(0) 10G(1) - * GPIO 5: rate select, 1G(0) 10G(1) - */ - nodes->gpio_props[0] = PROPERTY_ENTRY_STRING("pinctrl-names", "default"); - swnodes[SWNODE_GPIO] = NODE_PROP(nodes->gpio_name, nodes->gpio_props); - nodes->gpio0_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 0, GPIO_ACTIVE_HIGH); - nodes->gpio1_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 1, GPIO_ACTIVE_HIGH); - nodes->gpio2_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 2, GPIO_ACTIVE_LOW); - nodes->gpio3_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 3, GPIO_ACTIVE_HIGH); - nodes->gpio4_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 4, GPIO_ACTIVE_HIGH); - nodes->gpio5_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 5, GPIO_ACTIVE_HIGH); + return status; + + } - nodes->i2c_props[0] = PROPERTY_ENTRY_STRING("compatible", "snps,designware-i2c"); - nodes->i2c_props[1] = PROPERTY_ENTRY_BOOL("wx,i2c-snps-model"); - nodes->i2c_props[2] = PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_STANDARD_MODE_FREQ); - swnodes[SWNODE_I2C] = NODE_PROP(nodes->i2c_name, nodes->i2c_props); - nodes->i2c_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_I2C]); + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_ID_HIGH, &phy_id_high); - nodes->sfp_props[0] = PROPERTY_ENTRY_STRING("compatible", "sff,sfp"); - nodes->sfp_props[1] = PROPERTY_ENTRY_REF_ARRAY("i2c-bus", nodes->i2c_ref); - nodes->sfp_props[2] = PROPERTY_ENTRY_REF_ARRAY("tx-fault-gpios", nodes->gpio0_ref); - nodes->sfp_props[3] = PROPERTY_ENTRY_REF_ARRAY("tx-disable-gpios", nodes->gpio1_ref); - nodes->sfp_props[4] = PROPERTY_ENTRY_REF_ARRAY("mod-def0-gpios", nodes->gpio2_ref); - nodes->sfp_props[5] = PROPERTY_ENTRY_REF_ARRAY("los-gpios", nodes->gpio3_ref); - nodes->sfp_props[6] = PROPERTY_ENTRY_REF_ARRAY("rate-select1-gpios", nodes->gpio4_ref); - nodes->sfp_props[7] = PROPERTY_ENTRY_REF_ARRAY("rate-select0-gpios", nodes->gpio5_ref); - swnodes[SWNODE_SFP] = NODE_PROP(nodes->sfp_name, nodes->sfp_props); - nodes->sfp_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_SFP]); + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_ID_LOW, &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & TXGBE_PHY_REVISION_MASK); + } - nodes->phylink_props[0] = PROPERTY_ENTRY_STRING("managed", "in-band-status"); - nodes->phylink_props[1] = PROPERTY_ENTRY_REF_ARRAY("sfp", nodes->sfp_ref); - swnodes[SWNODE_PHYLINK] = NODE_PROP(nodes->phylink_name, nodes->phylink_props); + if (status == 0) { + status = mtdGetPhyRevision(&hw->phy_dev, hw->phy.addr, + (MTD_DEVICE_ID *)&hw->phy.revision, &numport, &thisport); + if (status == MTD_FAIL) { + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "Error in mtdGetPhyRevision()\n"); + } + } + return status; +} - nodes->group[SWNODE_GPIO] = &swnodes[SWNODE_GPIO]; - nodes->group[SWNODE_I2C] = &swnodes[SWNODE_I2C]; - nodes->group[SWNODE_SFP] = &swnodes[SWNODE_SFP]; - nodes->group[SWNODE_PHYLINK] = &swnodes[SWNODE_PHYLINK]; +/** + * txgbe_get_phy_type_from_id - Get the phy type + * @phy_id: PHY ID information + * + **/ +enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw) +{ + enum txgbe_phy_type phy_type; + u16 ext_ability = 0; - return software_node_register_node_group(nodes->group); + switch (hw->phy.id) { + case TN1010_PHY_ID: + phy_type = txgbe_phy_tn; + break; + case QT2022_PHY_ID: + phy_type = txgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = txgbe_phy_nl; + break; + default: + phy_type = txgbe_phy_unknown; + break; + } + if (phy_type == txgbe_phy_unknown) { + mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_EXT_ABILITY, &ext_ability); + + if (ext_ability & (TXGBE_MDIO_PHY_10GBASET_ABILITY | + TXGBE_MDIO_PHY_1000BASET_ABILITY)) + phy_type = txgbe_phy_cu_unknown; + else + phy_type = txgbe_phy_generic; + } + return phy_type; } -static int txgbe_pcs_read(struct mii_bus *bus, int addr, int devnum, int regnum) +/** + * txgbe_reset_phy - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 txgbe_reset_phy(struct txgbe_hw *hw) { - struct wx *wx = bus->priv; - u32 offset, val; + s32 status = 0; - if (addr) - return -EOPNOTSUPP; + if (status != 0 || hw->phy.type == txgbe_phy_none) + goto out; - offset = devnum << 16 | regnum; + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (TXGBE_ERR_OVERTEMP == TCALL(hw, phy.ops.check_overtemp))) + goto out; - /* Set the LAN port indicator to IDA_ADDR */ - wr32(wx, TXGBE_XPCS_IDA_ADDR, offset); + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + if(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)|| + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP)) + goto out; - /* Read the data from IDA_DATA register */ - val = rd32(wx, TXGBE_XPCS_IDA_DATA); + status = mtdHardwareReset(&hw->phy_dev, hw->phy.addr, 1000); - return (u16)val; +out: + return status; } -static int txgbe_pcs_write(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val) +/** + * txgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) { - struct wx *wx = bus->priv; - u32 offset; + u32 command; + s32 status = 0; - if (addr) - return -EOPNOTSUPP; + /* setup and write the address cycle command */ + command = TXGBE_MSCA_RA(reg_addr) | + TXGBE_MSCA_PA(hw->phy.addr) | + TXGBE_MSCA_DA(device_type); + wr32(hw, TXGBE_MSCA, command); - offset = devnum << 16 | regnum; + command = TXGBE_MSCC_CMD(TXGBE_MSCA_CMD_READ) | TXGBE_MSCC_BUSY; + wr32(hw, TXGBE_MSCC, command); - /* Set the LAN port indicator to IDA_ADDR */ - wr32(wx, TXGBE_XPCS_IDA_ADDR, offset); + /* wait to complete */ + status = po32m(hw, TXGBE_MSCC, + TXGBE_MSCC_BUSY, ~TXGBE_MSCC_BUSY, + TXGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return TXGBE_ERR_PHY; + } - /* Write the data to IDA_DATA register */ - wr32(wx, TXGBE_XPCS_IDA_DATA, val); + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, TXGBE_MSCC); return 0; } -static int txgbe_mdio_pcs_init(struct txgbe *txgbe) +/** + * txgbe_read_phy_reg - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) { - struct mii_bus *mii_bus; - struct dw_xpcs *xpcs; - struct pci_dev *pdev; - struct wx *wx; - int ret = 0; + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; - wx = txgbe->wx; - pdev = wx->pdev; + if (0 == TCALL(hw, mac.ops.acquire_swfw_sync, gssr)) { + status = txgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + TCALL(hw, mac.ops.release_swfw_sync, gssr); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } - mii_bus = devm_mdiobus_alloc(&pdev->dev); - if (!mii_bus) - return -ENOMEM; + return status; +} - mii_bus->name = "txgbe_pcs_mdio_bus"; - mii_bus->read_c45 = &txgbe_pcs_read; - mii_bus->write_c45 = &txgbe_pcs_write; - mii_bus->parent = &pdev->dev; - mii_bus->phy_mask = ~0; - mii_bus->priv = wx; - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe_pcs-%x", - pci_dev_id(pdev)); +/** + * txgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + s32 status = 0; - ret = devm_mdiobus_register(&pdev->dev, mii_bus); - if (ret) - return ret; + /* setup and write the address cycle command */ + command = TXGBE_MSCA_RA(reg_addr) | + TXGBE_MSCA_PA(hw->phy.addr) | + TXGBE_MSCA_DA(device_type); + wr32(hw, TXGBE_MSCA, command); - xpcs = xpcs_create_mdiodev(mii_bus, 0, PHY_INTERFACE_MODE_10GBASER); - if (IS_ERR(xpcs)) - return PTR_ERR(xpcs); + command = phy_data | TXGBE_MSCC_CMD(TXGBE_MSCA_CMD_WRITE) | + TXGBE_MSCC_BUSY; + wr32(hw, TXGBE_MSCC, command); - txgbe->xpcs = xpcs; + /* wait to complete */ + status = po32m(hw, TXGBE_MSCC, + TXGBE_MSCC_BUSY, ~TXGBE_MSCC_BUSY, + TXGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return TXGBE_ERR_PHY; + } return 0; } -static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, - phy_interface_t interface) +/** + * txgbe_write_phy_reg - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) { - struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; - if (interface == PHY_INTERFACE_MODE_10GBASER) - return &txgbe->xpcs->pcs; + if (TCALL(hw, mac.ops.acquire_swfw_sync, gssr) == 0) { + status = txgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + TCALL(hw, mac.ops.release_swfw_sync, gssr); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } - return NULL; + return status; } -static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, - const struct phylink_link_state *state) +MTD_STATUS txgbe_read_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 *value) { + struct txgbe_hw *hw = (struct txgbe_hw *)(dev->appData); + + if (hw->phy.addr != port) + return MTD_FAIL; + return txgbe_read_phy_reg(hw, reg, mmd, value); } -static void txgbe_mac_link_down(struct phylink_config *config, - unsigned int mode, phy_interface_t interface) +MTD_STATUS txgbe_write_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 value) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct txgbe_hw *hw = (struct txgbe_hw *)(dev->appData); + + if (hw->phy.addr != port) + return MTD_FAIL; - wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + return txgbe_write_phy_reg(hw, reg, mmd, value); } -static void txgbe_mac_link_up(struct phylink_config *config, - struct phy_device *phy, - unsigned int mode, phy_interface_t interface, - int speed, int duplex, - bool tx_pause, bool rx_pause) +/* +MTD_SEM txgbe_sem_create(MTD_SEM_BEGIN_STATE state) +{ + sema_init(struct semaphore * sem,int val) + mutex_init(mutex) +} +*/ + +/** + * txgbe_setup_phy_link - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); - u32 txcfg, wdg; + u16 speed = MTD_ADV_NONE; + MTD_DEV_PTR devptr = &hw->phy_dev; + u16 port = hw->phy.addr; + int i = 0; + MTD_BOOL linkUp = MTD_FALSE; + u16 linkSpeed = MTD_ADV_NONE; + + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) + speed |= MTD_SPEED_10GIG_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) + speed |= MTD_SPEED_1GIG_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) + speed |= MTD_SPEED_100M_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) + speed |= MTD_SPEED_10M_FD; + if (!autoneg_wait_to_complete) { + mtdGetAutonegSpeedDuplexResolution(devptr, port, &linkSpeed); + if (linkSpeed & speed) { + speed = linkSpeed; + goto out; + } + } - txcfg = rd32(wx, WX_MAC_TX_CFG); - txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; + mtdEnableSpeeds(devptr, port, speed, MTD_TRUE); + msleep(10); + speed = MTD_ADV_NONE; + for (i = 0; i < 300; i++) { + mtdIsBaseTUp(devptr, port ,&speed, &linkUp); + if (linkUp) { + break; + } + msleep(10); + } +out: switch (speed) { - case SPEED_10000: - txcfg |= WX_MAC_TX_CFG_SPEED_10G; - break; - case SPEED_1000: - case SPEED_100: - case SPEED_10: - txcfg |= WX_MAC_TX_CFG_SPEED_1G; - break; + case MTD_SPEED_10GIG_FD: + return TXGBE_LINK_SPEED_10GB_FULL; + case MTD_SPEED_1GIG_FD: + return TXGBE_LINK_SPEED_1GB_FULL; + case MTD_SPEED_100M_FD: + return TXGBE_LINK_SPEED_100_FULL; + case MTD_SPEED_10M_FD: + return TXGBE_LINK_SPEED_10_FULL; default: - break; + return TXGBE_LINK_SPEED_UNKNOWN; } - wr32(wx, WX_MAC_TX_CFG, txcfg | WX_MAC_TX_CFG_TE); - - /* Re configure MAC Rx */ - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); } -static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) +/** + * txgbe_setup_phy_link_speed - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + **/ +u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; - wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; - return txgbe_disable_sec_tx_path(wx); -} + if (speed & TXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; -static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) -{ - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + if (speed & TXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_100_FULL; - txgbe_enable_sec_tx_path(wx); - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + if (speed & TXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10_FULL; - return 0; + /* Setup link based on the new speed settings */ + return txgbe_setup_phy_link(hw, speed, autoneg_wait_to_complete); } -static const struct phylink_mac_ops txgbe_mac_ops = { - .mac_select_pcs = txgbe_phylink_mac_select, - .mac_prepare = txgbe_mac_prepare, - .mac_finish = txgbe_mac_finish, - .mac_config = txgbe_mac_config, - .mac_link_down = txgbe_mac_link_down, - .mac_link_up = txgbe_mac_link_up, -}; - -static int txgbe_phylink_init(struct txgbe *txgbe) +/** + * txgbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. + **/ +s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) { - struct fwnode_handle *fwnode = NULL; - struct phylink_config *config; - struct wx *wx = txgbe->wx; - phy_interface_t phy_mode; - struct phylink *phylink; - - config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - - config->dev = &wx->netdev->dev; - config->type = PHYLINK_NETDEV; - config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | - MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - - if (wx->media_type == sp_media_copper) { - phy_mode = PHY_INTERFACE_MODE_XAUI; - __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); - } else { - phy_mode = PHY_INTERFACE_MODE_10GBASER; - fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); - __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_1000BASEX, config->supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces); - } - - phylink = phylink_create(config, fwnode, phy_mode, &txgbe_mac_ops); - if (IS_ERR(phylink)) - return PTR_ERR(phylink); - - if (wx->phydev) { - int ret; - - ret = phylink_connect_phy(phylink, wx->phydev); - if (ret) { - phylink_destroy(phylink); - return ret; - } + s32 status; + u16 speed_ability; + *speed = 0; + *autoneg = true; + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_SPEED_ABILITY, &speed_ability); + + if (status == 0) { + if (speed_ability & TXGBE_MDIO_PHY_SPEED_10G) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_1G) + *speed |= TXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_100M) + *speed |= TXGBE_LINK_SPEED_100_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_10M) + *speed |= TXGBE_LINK_SPEED_10_FULL; } - txgbe->phylink = phylink; - - return 0; + return status; } -static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset) -{ - struct wx *wx = gpiochip_get_data(chip); - int val; - - val = rd32m(wx, WX_GPIO_EXT, BIT(offset)); - return !!(val & BIT(offset)); +/** + * txgbe_get_phy_firmware_version - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + u8 major, minor, inc, test; + + status = mtdGetFirmwareVersion(&hw->phy_dev, hw->phy.addr, + &major, &minor, &inc, &test); + if (status == 0) + *firmware_version = (major << 8) | minor; + return status; } -static int txgbe_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +/** + * txgbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 txgbe_identify_module(struct txgbe_hw *hw) { - struct wx *wx = gpiochip_get_data(chip); - u32 val; + s32 status = TXGBE_ERR_SFP_NOT_PRESENT; - val = rd32(wx, WX_GPIO_DDR); - if (BIT(offset) & val) - return GPIO_LINE_DIRECTION_OUT; + switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_fiber_qsfp: + status = txgbe_identify_qsfp_module(hw); + break; + case txgbe_media_type_fiber: + status = txgbe_identify_sfp_module(hw); + break; + default: + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + break; + } - return GPIO_LINE_DIRECTION_IN; + return status; } -static int txgbe_gpio_direction_in(struct gpio_chip *chip, unsigned int offset) +/** + * txgbe_identify_sfp_module - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) { - struct wx *wx = gpiochip_get_data(chip); - unsigned long flags; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_DDR, BIT(offset), 0); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum txgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 comp_codes_25g = 0; + u8 comp_copper_len = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u8 vendor_name[3] = {0, 0, 0}; + u16 phy_data = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + u8 sff8472_rev, addr_mode, databyte; + bool page_swap = false; + struct txgbe_adapter *adapter = hw->back; + int i; + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + return TXGBE_ERR_SFP_NOT_PRESENT; + } + } - return 0; -} + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return TXGBE_ERR_SWFW_SYNC; -static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset, - int val) -{ - struct wx *wx = gpiochip_get_data(chip); - unsigned long flags; - u32 set; + if (TCALL(hw, mac.ops.get_media_type) != txgbe_media_type_fiber) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } - set = val ? BIT(offset) : 0; + /* LAN ID is needed for I2C access */ + txgbe_init_i2c(hw); + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_DR, BIT(offset), set); - wr32m(wx, WX_GPIO_DDR, BIT(offset), BIT(offset)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + if (status != 0) + goto err_read_i2c_eeprom; - return 0; -} + if (identifier != TXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_25GBE_COMP_CODES, + &comp_codes_25g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_COPPER_LENGTH, + &comp_copper_len); + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + if (status != 0) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 + * 4 SFP_DA_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + * 7 SFP_act_lmt_DA_CORE0 + * 8 SFP_act_lmt_DA_CORE1 + * 9 SFP_1g_cu_CORE0 + * 10 SFP_1g_cu_CORE1 + * 11 SFP_1g_sx_CORE0 + * 12 SFP_1g_sx_CORE1 + */ + { + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_da_cu_core1; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + hw->dac_sfp = true; + } + + if (comp_copper_len == 0x1) + hw->bypassCtle = true; + else + hw->bypassCtle = false; + + if (comp_codes_25g == TXGBE_SFF_25GBASECR_91FEC || + comp_codes_25g == TXGBE_SFF_25GBASECR_74FEC || + comp_codes_25g == TXGBE_SFF_25GBASECR_NOFEC) { + hw->phy.fiber_suppport_speed = + TXGBE_LINK_SPEED_25GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL; + } else { + hw->phy.fiber_suppport_speed |= + TXGBE_LINK_SPEED_10GB_FULL; + } + if (!AUTO) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core1; + } + + } else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) { + hw->dac_sfp = false; + TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = txgbe_sfp_type_unknown; + } + + if (comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_12 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_12) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_25g_aoc_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_25g_aoc_core1; + } + } else if (comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_12 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_12) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_aoc_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_aoc_core1; + } else if (comp_codes_25g == TXGBE_SFF_25GBASESR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASEER_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core1; + } else if (comp_codes_25g == TXGBE_SFF_25GBASELR_CAPABLE ) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_lr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_lr_core1; + } else if (comp_codes_10g & + (TXGBE_SFF_10GBASESR_CAPABLE | + TXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = txgbe_sfp_type_unknown; + } + } -static void txgbe_gpio_irq_ack(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (hw->mac.type == txgbe_mac_aml) { + if ((comp_codes_25g == TXGBE_SFF_25GBASESR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASELR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASEER_CAPABLE) && + ((comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE) || + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + } else { + if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + } + /* Determine PHY vendor */ + if (hw->phy.type != txgbe_phy_nl) { + hw->phy.id = identifier; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case TXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_passive_tyco; + break; + case TXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = txgbe_phy_sfp_ftl_active; + else + hw->phy.type = txgbe_phy_sfp_ftl; + break; + case TXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = txgbe_phy_sfp_avago; + break; + case TXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = txgbe_phy_sfp_intel; + break; + default: + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_passive_unknown; + else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_active_unknown; + else + hw->phy.type = txgbe_phy_sfp_unknown; + break; + } + } - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32(wx, WX_GPIO_EOI, BIT(hwirq)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} + /* vendor name match QAX and can access sfp internal phy */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_VENDOR_NAME1, + &vendor_name[0]); + if (status != 0) + goto err_read_i2c_eeprom; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_VENDOR_NAME2, + &vendor_name[1]); + if (status != 0) + goto err_read_i2c_eeprom; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_VENDOR_NAME3, + &vendor_name[2]); + if (status != 0) + goto err_read_i2c_eeprom; + + if (vendor_name[0] == 0x51 && + vendor_name[1] == 0x41 && + vendor_name[2] == 0x58) { + status = TCALL(hw, phy.ops.read_i2c_sfp_phy, + 0x8008, + &phy_data); + if (status == 0 || phy_data != 0xffff) { + hw->phy.multispeed_fiber = false; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_10g_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_10g_cu_core1; + } + } -static void txgbe_gpio_irq_mask(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; + /* Allow any DA cable vendor */ + if (cable_tech & (TXGBE_SFF_DA_PASSIVE_CABLE | + TXGBE_SFF_DA_ACTIVE_CABLE)) { + status = 0; + goto out; + } - gpiochip_disable_irq(gc, hwirq); + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && comp_codes_25g == 0 && + !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + } + if (hw->mac.type == txgbe_mac_sp) { + /*record eeprom info*/ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto err_read_i2c_eeprom; + + /* addressing mode is not supported */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto err_read_i2c_eeprom; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, " + "but not supported. Please report the module type to the " + "driver maintainers.\n"); + page_swap = true; + } - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + adapter->eeprom_type = ETH_MODULE_SFF_8079; + adapter->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + adapter->eeprom_type = ETH_MODULE_SFF_8472; + adapter->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + for (i = 0; i < adapter->eeprom_len; i++) { + if (i < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, i, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto err_read_i2c_eeprom; + + adapter->i2c_eeprom[i] = databyte; + } + } +out: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); -static void txgbe_gpio_irq_unmask(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; + return status; - gpiochip_enable_irq(gc, hwirq); +err_read_i2c_eeprom: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); + hw->phy.sfp_type = txgbe_sfp_type_not_present; + if (hw->phy.type != txgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = txgbe_phy_unknown; + } + return TXGBE_ERR_SFP_NOT_PRESENT; } -static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset) +s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw) { - struct wx *wx = gpiochip_get_data(gc); - u32 pol, val; + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u8 identifier = 0, transceiver_type = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + return TXGBE_ERR_SFP_NOT_PRESENT; + } + } - pol = rd32(wx, WX_GPIO_POLARITY); - val = rd32(wx, WX_GPIO_EXT); + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return TXGBE_ERR_SWFW_SYNC; - if (val & BIT(offset)) - pol &= ~BIT(offset); - else - pol |= BIT(offset); + if (TCALL(hw, mac.ops.get_media_type) != txgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } - wr32(wx, WX_GPIO_POLARITY, pol); -} + /* LAN ID is needed for I2C access */ + txgbe_init_i2c(hw); + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier == TXGBE_SFF_IDENTIFIER_QSFP || + identifier == TXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = txgbe_phy_sfp_unknown; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_ETHERNET_COMP_OFFSET, + &transceiver_type); + if (status != 0) + goto err_read_i2c_eeprom; + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_cu_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_cu_core1; + hw->phy.fiber_suppport_speed = + TXGBE_LINK_SPEED_40GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL; + + if (!AUTO) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core1; + } + } -static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - u32 level, polarity, mask; - unsigned long flags; + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core1; + } - mask = BIT(hwirq); + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_lr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_lr_core1; + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_active_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_active_core1; + } - if (type & IRQ_TYPE_LEVEL_MASK) { - level = 0; - irq_set_handler_locked(d, handle_level_irq); } else { - level = mask; - irq_set_handler_locked(d, handle_edge_irq); + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; } +out: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); - if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) - polarity = mask; - else - polarity = 0; + return status; - raw_spin_lock_irqsave(&wx->gpio_lock, flags); +err_read_i2c_eeprom: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); - wr32m(wx, WX_GPIO_INTEN, mask, mask); - wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level); - if (type == IRQ_TYPE_EDGE_BOTH) - txgbe_toggle_trigger(gc, hwirq); - else - wr32m(wx, WX_GPIO_POLARITY, mask, polarity); + hw->phy.sfp_type = txgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = txgbe_phy_unknown; - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - - return 0; + return TXGBE_ERR_SFP_NOT_PRESENT; } -static const struct irq_chip txgbe_gpio_irq_chip = { - .name = "txgbe_gpio_irq", - .irq_ack = txgbe_gpio_irq_ack, - .irq_mask = txgbe_gpio_irq_mask, - .irq_unmask = txgbe_gpio_irq_unmask, - .irq_set_type = txgbe_gpio_set_type, - .flags = IRQCHIP_IMMUTABLE, - GPIOCHIP_IRQ_RESOURCE_HELPERS, -}; - -static void txgbe_irq_handler(struct irq_desc *desc) +s32 txgbe_init_i2c(struct txgbe_hw *hw) { - struct irq_chip *chip = irq_desc_get_chip(desc); - struct wx *wx = irq_desc_get_handler_data(desc); - struct txgbe *txgbe = wx->priv; - irq_hw_number_t hwirq; - unsigned long gpioirq; - struct gpio_chip *gc; - unsigned long flags; - u32 eicr; + wr32(hw, TXGBE_I2C_ENABLE, 0); + + wr32(hw, TXGBE_I2C_CON, + (TXGBE_I2C_CON_MASTER_MODE | + TXGBE_I2C_CON_SPEED(1) | + TXGBE_I2C_CON_RESTART_EN | + TXGBE_I2C_CON_SLAVE_DISABLE)); + /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ + wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); + + /* ic_clk = 1/156.25MHz + * SCL_High_time = [(HCNT + IC_*_SPKLEN + 7) * ic_clk] + SCL_Fall_time + * SCL_Low_time = [(LCNT + 1) * ic_clk] - SCL_Fall_time + SCL_Rise_time + * set I2C Frequency to Standard Speed Mode 100KHz + */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 2000); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 2000); + + wr32m(hw, TXGBE_I2C_SDA_HOLD, + TXGBE_I2C_SDA_RX_HOLD | TXGBE_I2C_SDA_TX_HOLD, 0x640064); + } else if (hw->mac.type == txgbe_mac_sp) { + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 780); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 780); + } - eicr = wx_misc_isb(wx, WX_ISB_MISC); + wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */ + wr32(hw, TXGBE_I2C_TX_TL, 4); - chained_irq_enter(chip, desc); + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); + wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); - gpioirq = rd32(wx, WX_GPIO_INTSTATUS); + wr32(hw, TXGBE_I2C_INTR_MASK, 0); + wr32(hw, TXGBE_I2C_ENABLE, 1); - gc = txgbe->gpio; - for_each_set_bit(hwirq, &gpioirq, gc->ngpio) { - int gpio = irq_find_mapping(gc->irq.domain, hwirq); - u32 irq_type = irq_get_trigger_type(gpio); + return 0; +} - generic_handle_domain_irq(gc->irq.domain, hwirq); +STATIC s32 txgbe_init_i2c_sfp_phy(struct txgbe_hw *hw) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + + wr32(hw, TXGBE_I2C_CON, + (TXGBE_I2C_CON_MASTER_MODE | + TXGBE_I2C_CON_SPEED(1) | + TXGBE_I2C_CON_RESTART_EN | + TXGBE_I2C_CON_SLAVE_DISABLE)); + /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ + wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 600); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 600); + + wr32(hw, TXGBE_I2C_RX_TL, 1); /* 2bytes for rx full signal */ + wr32(hw, TXGBE_I2C_TX_TL, 4); + + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); + wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); + + wr32(hw, TXGBE_I2C_INTR_MASK, 0); + wr32(hw, TXGBE_I2C_ENABLE, 1); - if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - txgbe_toggle_trigger(gc, hwirq); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - } - } + return 0; +} - chained_irq_exit(chip, desc); - if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | - TXGBE_PX_MISC_ETH_AN)) { - u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); +s32 txgbe_clear_i2c(struct txgbe_hw *hw) +{ + s32 status = 0; - phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); - } + /* wait for completion */ + status = po32m(hw, TXGBE_I2C_STATUS, + TXGBE_I2C_STATUS_MST_ACTIVITY, ~TXGBE_I2C_STATUS_MST_ACTIVITY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; - /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); -} - -static int txgbe_gpio_init(struct txgbe *txgbe) -{ - struct gpio_irq_chip *girq; - struct gpio_chip *gc; - struct device *dev; - struct wx *wx; - int ret; - - wx = txgbe->wx; - dev = &wx->pdev->dev; - - raw_spin_lock_init(&wx->gpio_lock); - - gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); - if (!gc) - return -ENOMEM; - - gc->label = devm_kasprintf(dev, GFP_KERNEL, "txgbe_gpio-%x", - pci_dev_id(wx->pdev)); - if (!gc->label) - return -ENOMEM; - - gc->base = -1; - gc->ngpio = 6; - gc->owner = THIS_MODULE; - gc->parent = dev; - gc->fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_GPIO]); - gc->get = txgbe_gpio_get; - gc->get_direction = txgbe_gpio_get_direction; - gc->direction_input = txgbe_gpio_direction_in; - gc->direction_output = txgbe_gpio_direction_out; - - girq = &gc->irq; - gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip); - girq->parent_handler = txgbe_irq_handler; - girq->parent_handler_data = wx; - girq->num_parents = 1; - girq->parents = devm_kcalloc(dev, girq->num_parents, - sizeof(*girq->parents), GFP_KERNEL); - if (!girq->parents) - return -ENOMEM; - girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector; - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_bad_irq; - - ret = devm_gpiochip_add_data(dev, gc, wx); - if (ret) - return ret; - - txgbe->gpio = gc; + wr32(hw, TXGBE_I2C_ENABLE, 0); - return 0; +out: + return status; } -static int txgbe_clock_register(struct txgbe *txgbe) +/** + * txgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) { - struct pci_dev *pdev = txgbe->wx->pdev; - struct clk_lookup *clock; - char clk_name[32]; - struct clk *clk; - - snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d", - pci_dev_id(pdev)); + txgbe_init_i2c(hw); + return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} - clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); - if (IS_ERR(clk)) - return PTR_ERR(clk); +/** + * txgbe_read_i2c_sff8472 - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + txgbe_init_i2c(hw); + return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} - clock = clkdev_create(clk, NULL, clk_name); - if (!clock) { - clk_unregister(clk); - return -ENOMEM; - } +/** + * txgbe_read_i2c_sff8636 - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 txgbe_read_i2c_sff8636(struct txgbe_hw *hw, u8 page ,u8 byte_offset, + u8 *sff8636_data) +{ + txgbe_init_i2c(hw); + TCALL(hw, phy.ops.write_i2c_byte, TXGBE_SFF_QSFP_PAGE_SELECT, + TXGBE_I2C_EEPROM_DEV_ADDR, + page); + + return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + sff8636_data); +} - txgbe->clk = clk; - txgbe->clock = clock; - return 0; +/** + * txgbe_read_i2c_sfp_phy - Reads 16 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xAC + * @eeprom_data: value read + * + * Performs byte read operation to Fiber to Copper SFP module + * internal phy over I2C + **/ +s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, + u16 *data) +{ + txgbe_init_i2c_sfp_phy(hw); + + return txgbe_read_i2c_word(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR3, + data); } -static int txgbe_i2c_read(void *context, unsigned int reg, unsigned int *val) +/** + * txgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) { - struct wx *wx = context; - - *val = rd32(wx, reg + TXGBE_I2C_BASE); - - return 0; + txgbe_init_i2c(hw); + return TCALL(hw, phy.ops.write_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); } -static int txgbe_i2c_write(void *context, unsigned int reg, unsigned int val) +/** + * txgbe_read_i2c_byte_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 txgbe_read_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) { - struct wx *wx = context; + s32 status = 0; + + UNREFERENCED_PARAMETER(dev_addr); + UNREFERENCED_PARAMETER(lock); + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, + byte_offset | TXGBE_I2C_DATA_CMD_STOP); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 100); + if (status != 0) + goto out; + + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + +out: + return status; +} - wr32(wx, reg + TXGBE_I2C_BASE, val); +/** +* txgbe_read_i2c_word_int - Reads 16 bit word over I2C +* @hw: pointer to hardware structure +* @byte_offset: byte offset to read +* @data: value read +* @lock: true if to take and release semaphore +* +* Performs byte read operation to SFP module's EEPROM over I2C interface at +* a specified device address. +**/ +STATIC s32 txgbe_read_i2c_word_int(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data, bool lock) +{ + s32 status = 0; + + UNREFERENCED_PARAMETER(dev_addr); + UNREFERENCED_PARAMETER(lock); + + if ((hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1)) { + /* reg offset format 0x000yyyyy */ + byte_offset &= 0x1f; + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* write reg_offset */ + wr32(hw, TXGBE_I2C_DATA_CMD, (u8)byte_offset | TXGBE_I2C_DATA_CMD_STOP); + + usec_delay(TXGBE_I2C_TIMEOUT); + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ | TXGBE_I2C_DATA_CMD_STOP); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + *data <<= 8; + *data += 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + } else if ((hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* write reg_offset */ + wr32(hw, TXGBE_I2C_DATA_CMD, 0x23); + wr32(hw, TXGBE_I2C_DATA_CMD, byte_offset >> 8); + wr32(hw, TXGBE_I2C_DATA_CMD, (u8)byte_offset | TXGBE_I2C_DATA_CMD_STOP); + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* delay for mcu access sfp internal phy through MDIO + * delay time need larger than 1ms + */ + mdelay(5); + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ | TXGBE_I2C_DATA_CMD_STOP); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 100); + if (status != 0) + goto out; + + /* fixme LSB data is the data of the duplicate MSB */ + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + *data <<= 8; + *data += 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + } - return 0; +out: + return status; } -static const struct regmap_config i2c_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_read = txgbe_i2c_read, - .reg_write = txgbe_i2c_write, - .fast_io = true, -}; - -static int txgbe_i2c_register(struct txgbe *txgbe) -{ - struct platform_device_info info = {}; - struct platform_device *i2c_dev; - struct regmap *i2c_regmap; - struct pci_dev *pdev; - struct wx *wx; - - wx = txgbe->wx; - pdev = wx->pdev; - i2c_regmap = devm_regmap_init(&pdev->dev, NULL, wx, &i2c_regmap_config); - if (IS_ERR(i2c_regmap)) { - wx_err(wx, "failed to init I2C regmap\n"); - return PTR_ERR(i2c_regmap); - } - info.parent = &pdev->dev; - info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = "i2c_designware"; - info.id = pci_dev_id(pdev); +/** + * txgbe_switch_i2c_slave_addr - Switch I2C slave address + * @hw: pointer to hardware structure + * @dev_addr: slave addr to switch + * + **/ +s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + wr32(hw, TXGBE_I2C_TAR, dev_addr >> 1); + wr32(hw, TXGBE_I2C_ENABLE, 1); + return 0; +} - info.res = &DEFINE_RES_IRQ(pdev->irq); - info.num_res = 1; - i2c_dev = platform_device_register_full(&info); - if (IS_ERR(i2c_dev)) - return PTR_ERR(i2c_dev); - txgbe->i2c_dev = i2c_dev; +/** + * txgbe_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + txgbe_switch_i2c_slave_addr(hw, dev_addr); - return 0; + return txgbe_read_i2c_byte_int(hw, byte_offset, dev_addr, + data, true); } -static int txgbe_sfp_register(struct txgbe *txgbe) +/** + * txgbe_read_i2c_word - Reads 16 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data) { - struct pci_dev *pdev = txgbe->wx->pdev; - struct platform_device_info info = {}; - struct platform_device *sfp_dev; + txgbe_switch_i2c_slave_addr(hw, dev_addr); - info.parent = &pdev->dev; - info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_SFP]); - info.name = "sfp"; - info.id = pci_dev_id(pdev); - sfp_dev = platform_device_register_full(&info); - if (IS_ERR(sfp_dev)) - return PTR_ERR(sfp_dev); + return txgbe_read_i2c_word_int(hw, byte_offset, dev_addr, + data, true); +} - txgbe->sfp_dev = sfp_dev; - return 0; +/** + * txgbe_write_i2c_byte_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 txgbe_write_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = 0; + + UNREFERENCED_PARAMETER(dev_addr); + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + wr32(hw, TXGBE_I2C_DATA_CMD, byte_offset); + wr32(hw, TXGBE_I2C_DATA_CMD, + data | TXGBE_I2C_DATA_CMD_WRITE); + + /* wait for write complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); +out: + return status; } -static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, - int devnum, int regnum) +/** + * txgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) { - struct wx *wx = bus->priv; - u32 val, command; - int ret; + return txgbe_write_i2c_byte_int(hw, byte_offset, dev_addr, + data); +} - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } +/** + * txgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw) +{ + s32 status = 0; + u32 ts_state; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + ts_state = rd32(hw, TXGBE_AML_INTR_HIGH_STS); + if (ts_state) { + wr32(hw, TXGBE_AML_INTR_RAW_HI, TXGBE_AML_INTR_CL_HI); + wr32(hw, TXGBE_AML_INTR_RAW_LO, TXGBE_AML_INTR_CL_LO); + status = TXGBE_ERR_OVERTEMP; + } else { + ts_state = rd32(hw, TXGBE_AML_INTR_LOW_STS); + if (ts_state) { + status = TXGBE_ERR_UNDERTEMP; + } + } + } else { + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, TXGBE_TS_ALARM_ST); - return (u16)rd32(wx, WX_MSCC); + if (ts_state & TXGBE_TS_ALARM_ST_DALARM) + status = TXGBE_ERR_UNDERTEMP; + else if (ts_state & TXGBE_TS_ALARM_ST_ALARM) + status = TXGBE_ERR_OVERTEMP; + } + return status; } -static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) + +s32 txgbe_init_external_phy(struct txgbe_hw *hw) { - struct wx *wx = bus->priv; - int ret, command; - u16 val; + s32 status = 0; + + MTD_DEV_PTR devptr = &(hw->phy_dev); + + hw->phy.addr = 0; + + devptr->appData = hw; + status = mtdLoadDriver(txgbe_read_mdio, + txgbe_write_mdio, + MTD_FALSE, + NULL, + NULL, + NULL, + NULL, + hw->phy.addr, + devptr); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "External PHY initilization failed.\n"); + return TXGBE_ERR_PHY; + } - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); + return status; +} - command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - -static int txgbe_ext_phy_init(struct txgbe *txgbe) -{ - struct phy_device *phydev; - struct mii_bus *mii_bus; - struct pci_dev *pdev; - struct wx *wx; - int ret = 0; - - wx = txgbe->wx; - pdev = wx->pdev; - - mii_bus = devm_mdiobus_alloc(&pdev->dev); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &txgbe_phy_read; - mii_bus->write_c45 = &txgbe_phy_write; - mii_bus->parent = &pdev->dev; - mii_bus->phy_mask = GENMASK(31, 1); - mii_bus->priv = wx; - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", - (pdev->bus->number << 8) | pdev->devfn); - - ret = devm_mdiobus_register(&pdev->dev, mii_bus); - if (ret) { - wx_err(wx, "failed to register MDIO bus: %d\n", ret); - return ret; - } +s32 txgbe_uninit_external_phy(struct txgbe_hw *hw) +{ + return mtdUnloadDriver(&hw->phy_dev); +} - phydev = phy_find_first(mii_bus); - if (!phydev) { - wx_err(wx, "no PHY found\n"); - return -ENODEV; - } +s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit) +{ + return mtdSetPauseAdvertisement(&hw->phy_dev, hw->phy.addr, + (pause_bit>>10)&0x3, MTD_FALSE); +} - phy_attached_info(phydev); +s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; - wx->link = 0; - wx->speed = 0; - wx->duplex = 0; - wx->phydev = phydev; + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_AUTO_NEG_DEV_TYPE, + TXGBE_MDIO_AUTO_NEG_ADVT, &value); + *pause_bit = (u8)((value>>10)&0x3); + return status; - ret = txgbe_phylink_init(txgbe); - if (ret) { - wx_err(wx, "failed to init phylink: %d\n", ret); - return ret; - } +} - return 0; +s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) +{ + return mtdGetLPAdvertisedPause(&hw->phy_dev, hw->phy.addr, pause_bit); } -int txgbe_init_phy(struct txgbe *txgbe) +s32 txgbe_external_phy_suspend(struct txgbe_hw *hw) { - int ret; + s32 status = 0; + u16 value = 0; + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, &value); - if (txgbe->wx->media_type == sp_media_copper) - return txgbe_ext_phy_init(txgbe); + if (status) + goto out; - ret = txgbe_swnodes_register(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to register software nodes\n"); - return ret; - } + value |= TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER; - ret = txgbe_mdio_pcs_init(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); - goto err_unregister_swnode; - } + status = mtdHwXmdioWrite(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, value); - ret = txgbe_phylink_init(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to init phylink\n"); - goto err_destroy_xpcs; - } +out: + return status; +} - ret = txgbe_gpio_init(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to init gpio\n"); - goto err_destroy_phylink; - } +s32 txgbe_external_phy_resume(struct txgbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; - ret = txgbe_clock_register(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to register clock: %d\n", ret); - goto err_destroy_phylink; - } + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, &value); - ret = txgbe_i2c_register(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); - goto err_unregister_clk; - } + if (status) + goto out; - ret = txgbe_sfp_register(txgbe); - if (ret) { - wx_err(txgbe->wx, "failed to register sfp\n"); - goto err_unregister_i2c; - } + if (!(value & ~TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER)) + goto out; - return 0; + value |= TXGBE_MDIO_VENDOR_SPECIFIC_2_SW_RST; + value &= ~TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER; -err_unregister_i2c: - platform_device_unregister(txgbe->i2c_dev); -err_unregister_clk: - clkdev_drop(txgbe->clock); - clk_unregister(txgbe->clk); -err_destroy_phylink: - phylink_destroy(txgbe->phylink); -err_destroy_xpcs: - xpcs_destroy(txgbe->xpcs); -err_unregister_swnode: - software_node_unregister_node_group(txgbe->nodes.group); + status = mtdHwXmdioWrite(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, value); - return ret; +out: + return status; } -void txgbe_remove_phy(struct txgbe *txgbe) -{ - if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->phylink); - phylink_destroy(txgbe->phylink); - return; - } - - platform_device_unregister(txgbe->sfp_dev); - platform_device_unregister(txgbe->i2c_dev); - clkdev_drop(txgbe->clock); - clk_unregister(txgbe->clk); - phylink_destroy(txgbe->phylink); - xpcs_destroy(txgbe->xpcs); - software_node_unregister_node_group(txgbe->nodes.group); -} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h index 1ab592124986aea4254a3e334193a11382eeac4b..52dee34ad32f82aaf52e66f8e9d376167fc593eb 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -1,10 +1,252 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_phy.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + #ifndef _TXGBE_PHY_H_ #define _TXGBE_PHY_H_ -int txgbe_init_phy(struct txgbe *txgbe); -void txgbe_remove_phy(struct txgbe *txgbe); +#include "txgbe_type.h" +#define TXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define TXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define TXGBE_I2C_EEPROM_BANK_LEN 0xFF + +/*fiber to copper module inter reg i2c addr */ +#define TXGBE_I2C_EEPROM_DEV_ADDR3 0xAC +#define TXGBE_I2C_PHY_LOCAL_RX_STATUS BIT(12) +#define TXGBE_I2C_PHY_REMOTE_RX_STATUS BIT(13) +#define TXGBE_I2C_10G_SFP_LINK_STATUS BIT(10) + +/* EEPROM byte offsets */ +#define TXGBE_SFF_IDENTIFIER 0x0 +#define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_IDENTIFIER_QSFP 0xC +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define TXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define TXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define TXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define TXGBE_SFF_1GBE_COMP_CODES 0x6 +#define TXGBE_SFF_10GBE_COMP_CODES 0x3 +#define TXGBE_SFF_25GBE_COMP_CODES 0x24 +#define TXGBE_SFF_COPPER_LENGTH 0x12 +#define TXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define TXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define TXGBE_SFF_DDM_IMPLEMENTED 0x40 +#define TXGBE_SFF_SFF_8472_SWAP 0x5C +#define TXGBE_SFF_SFF_8472_COMP 0x5E +#define TXGBE_SFF_SFF_8472_OSCB 0x6E +#define TXGBE_SFF_SFF_8472_ESCB 0x76 +#define TXGBE_SFF_SFF_REVISION_ADDR 0x01 +#define TXGBE_SFF_QSFP_PAGE_SELECT 0x7F + +#define TXGBE_MODULE_QSFP_MAX_LEN 640 + +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define TXGBE_SFF_QSFP_CONNECTOR 0x82 +#define TXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define TXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define TXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define TXGBE_SFF_QSFP_DEVICE_TECH 0x93 +#define TXGBE_SFF_CABLE_VENDOR_NAME1 0x14 +#define TXGBE_SFF_CABLE_VENDOR_NAME2 0x15 +#define TXGBE_SFF_CABLE_VENDOR_NAME3 0x16 + +/* Bitmasks */ +#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define TXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define TXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define TXGBE_SFF_1GBASET_CAPABLE 0x8 +#define TXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_25GBASESR_CAPABLE 0x2 +#define TXGBE_SFF_25GBASELR_CAPABLE 0x3 +#define TXGBE_SFF_25GBASEER_CAPABLE 0x4 +#define TXGBE_SFF_25GBASECR_91FEC 0xB +#define TXGBE_SFF_25GBASECR_74FEC 0xC +#define TXGBE_SFF_25GBASECR_NOFEC 0xD +#define TXGBE_SFF_40GBASE_SR_CAPABLE 0x10 +#define TXGBE_SFF_4x10GBASESR_CAP 0x11 +#define TXGBE_SFF_40GBASEPSM4_Parallel 0x12 +#define TXGBE_SFF_40GBASE_SWMD4_CAP 0x1f +#define TXGBE_SFF_COPPER_5M 0x5 +#define TXGBE_SFF_COPPER_3M 0x3 +#define TXGBE_SFF_COPPER_1M 0x1 + +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define TXGBE_SFF_25GAUI_C2M_AOC_BER_5 0x1 +#define TXGBE_SFF_25GAUI_C2M_ACC_BER_5 0x8 +#define TXGBE_SFF_25GAUI_C2M_AOC_BER_12 0x18 +#define TXGBE_SFF_25GAUI_C2M_ACC_BER_12 0x19 + +#define TXGBE_ETHERNET_COMP_OFFSET 0x83 +#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3) +#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2) +#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1) +#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0) + +#define TXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define TXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define TXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define TXGBE_SFF_ADDRESSING_MODE 0x4 +#define TXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define TXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define TXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define TXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define TXGBE_I2C_EEPROM_READ_MASK 0x100 +#define TXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define TXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define TXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define TXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define TXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define TXGBE_CS4227 0xBE /* CS4227 address */ +#define TXGBE_CS4227_GLOBAL_ID_LSB 0 +#define TXGBE_CS4227_SCRATCH 2 +#define TXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define TXGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define TXGBE_CS4227_RETRIES 5 +#define TXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define TXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define TXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define TXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define TXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define TXGBE_CS4227_EDC_MODE_SR 0x0004 +#define TXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define TXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define TXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define TXGBE_PE 0xE0 /* Port expander address */ +#define TXGBE_PE_OUTPUT 1 /* Output register offset */ +#define TXGBE_PE_CONFIG 3 /* Config register offset */ +#define TXGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define TXGBE_TAF_SYM_PAUSE (0x1) +#define TXGBE_TAF_ASM_PAUSE (0x2) + +/* Bit-shift macros */ +#define TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define TXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define TXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define TXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define TXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define TXGBE_I2C_T_HD_STA 4 +#define TXGBE_I2C_T_LOW 5 +#define TXGBE_I2C_T_HIGH 4 +#define TXGBE_I2C_T_SU_STA 5 +#define TXGBE_I2C_T_HD_DATA 5 +#define TXGBE_I2C_T_SU_DATA 1 +#define TXGBE_I2C_T_RISE 1 +#define TXGBE_I2C_T_FALL 1 +#define TXGBE_I2C_T_SU_STO 4 +#define TXGBE_I2C_T_BUF 5 + +#ifndef TXGBE_SFP_DETECT_RETRIES +#define TXGBE_SFP_DETECT_RETRIES 10 +#endif /* TXGBE_SFP_DETECT_RETRIES */ + +/* SFP+ SFF-8472 Compliance */ +#define TXGBE_SFF_SFF_8472_UNSUP 0x00 + + +enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw); +s32 txgbe_get_phy_id(struct txgbe_hw *hw); +s32 txgbe_reset_phy(struct txgbe_hw *hw); +s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); +s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete); +u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg); +s32 txgbe_check_reset_blocked(struct txgbe_hw *hw); + +s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw, + u16 *firmware_version); + +s32 txgbe_identify_module(struct txgbe_hw *hw); +s32 txgbe_identify_sfp_module(struct txgbe_hw *hw); +s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw); +s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw); +s32 txgbe_init_i2c(struct txgbe_hw *hw); +s32 txgbe_clear_i2c(struct txgbe_hw *hw); +s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr); +s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data); + + +s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +s32 txgbe_read_i2c_sff8636(struct txgbe_hw *hw, u8 page ,u8 byte_offset, + u8 *sff8636_data); +s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, + u16 *data); + +s32 txgbe_init_external_phy(struct txgbe_hw *hw); +s32 txgbe_uninit_external_phy(struct txgbe_hw *hw); +s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit); +s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); +s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); +s32 txgbe_external_phy_suspend(struct txgbe_hw *hw); +s32 txgbe_external_phy_resume(struct txgbe_hw *hw); + +MTD_STATUS txgbe_read_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 *value); + +MTD_STATUS txgbe_write_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 value); + -#endif /* _TXGBE_NODE_H_ */ +#endif /* _TXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c new file mode 100644 index 0000000000000000000000000000000000000000..bae9c0ec4c8a66055606895c8dd1153666cab480 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c @@ -0,0 +1,929 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_procfs.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" +#include "txgbe_hw.h" +#include "txgbe_type.h" + +#ifdef TXGBE_PROCFS +#ifndef TXGBE_SYSFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *txgbe_top_dir; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct txgbe_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +static int txgbe_fwbanner(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%s\n", adapter->eeprom_id); +} + +static int txgbe_porttype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + return snprintf(page, count, "%d\n", + test_bit(__TXGBE_DOWN, &adapter->state)); +} + +static int txgbe_portspeed(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + int speed = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_100_FULL: + speed = 1; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + speed = 10; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + speed = 100; + break; + default: + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int txgbe_wqlflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int txgbe_xflowctl(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct txgbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int txgbe_rxdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int txgbe_rxerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int txgbe_rxupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", rd32(hw, TXGBE_TPR)); +} + +static int txgbe_rxmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + int i, mprc = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + for (i = 0; i < 128; i++) + mprc += rd32(hw, TXGBE_PX_MPRC(i)); + return snprintf(page, count, "%d\n", mprc); +} + +static int txgbe_rxbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW)); +} + +static int txgbe_txupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW)); +} + +static int txgbe_txmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW)); +} + +static int txgbe_txbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW)); +} + +static int txgbe_txerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int txgbe_txdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int txgbe_rxframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int txgbe_rxbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int txgbe_txframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int txgbe_txbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int txgbe_linkstat(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + int bitmask = 0; + u32 link_speed; + bool link_up = false; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + bitmask |= 1; + + if (TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false) + /* always assume link is up, if no check link function */ + link_up = true; + if (link_up) + bitmask |= 2; + + if (adapter->old_lsc != adapter->lsc_int) { + bitmask |= 4; + adapter->old_lsc = adapter->lsc_int; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int txgbe_funcid(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct txgbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%X\n", hw->bus.func); +} + +static int txgbe_funcvers(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%s\n", txgbe_driver_version); +} + +static int txgbe_macburn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int txgbe_macadmn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_hw *hw; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int txgbe_maclla1(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct txgbe_hw *hw; + int rc; + u16 eeprom_buff[6]; + u16 first_word = 0x37; + const u16 word_count = ARRAY_SIZE(eeprom_buff); + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + rc = TCALL(hw, eeprom.ops.read_buffer, first_word, 1, &first_word); + if (rc != 0) + return snprintf(page, count, + "error: reading pointer to the EEPROM\n"); + + if (first_word != 0x0000 && first_word != 0xFFFF) { + rc = TCALL(hw, eeprom.ops.read_buffer, first_word, word_count, + eeprom_buff); + if (rc != 0) + return snprintf(page, count, "error: reading buffer\n"); + } else { + memset(eeprom_buff, 0, sizeof(eeprom_buff)); + } + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + } + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); +} + +static int txgbe_mtusize(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int txgbe_featflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int bitmask = 0; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + if (adapter->netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +} + +static int txgbe_lsominct(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int txgbe_prommode(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int txgbe_txdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int txgbe_rxdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int txgbe_rxqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); +} + +static int txgbe_txqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + diff/adapter->num_tx_queues); +} + +static int txgbe_iovotype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "2\n"); +} + +static int txgbe_funcnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->num_vfs); +} + +static int txgbe_pciebnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->pdev->bus->number); +} + +static int txgbe_therm_dealarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_therm_proc_data *therm_data = + (struct txgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->dalarm_thresh); +} + + +static int txgbe_therm_alarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct txgbe_therm_proc_data *therm_data = + (struct txgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->alarm_thresh); +} + +static int txgbe_therm_temp(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + s32 status; + struct txgbe_therm_proc_data *therm_data = + (struct txgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = txgbe_get_thermal_sensor_data(therm_data->hw); + if (status != 0) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + + +struct txgbe_proc_type { + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct txgbe_proc_type txgbe_proc_entries[] = { + {"fwbanner", &txgbe_fwbanner}, + {"porttype", &txgbe_porttype}, + {"portspeed", &txgbe_portspeed}, + {"wqlflag", &txgbe_wqlflag}, + {"xflowctl", &txgbe_xflowctl}, + {"rxdrops", &txgbe_rxdrops}, + {"rxerrors", &txgbe_rxerrors}, + {"rxupacks", &txgbe_rxupacks}, + {"rxmpacks", &txgbe_rxmpacks}, + {"rxbpacks", &txgbe_rxbpacks}, + {"txdrops", &txgbe_txdrops}, + {"txerrors", &txgbe_txerrors}, + {"txupacks", &txgbe_txupacks}, + {"txmpacks", &txgbe_txmpacks}, + {"txbpacks", &txgbe_txbpacks}, + {"rxframes", &txgbe_rxframes}, + {"rxbytes", &txgbe_rxbytes}, + {"txframes", &txgbe_txframes}, + {"txbytes", &txgbe_txbytes}, + {"linkstat", &txgbe_linkstat}, + {"funcid", &txgbe_funcid}, + {"funcvers", &txgbe_funcvers}, + {"macburn", &txgbe_macburn}, + {"macadmn", &txgbe_macadmn}, + {"maclla1", &txgbe_maclla1}, + {"mtusize", &txgbe_mtusize}, + {"featflag", &txgbe_featflag}, + {"lsominct", &txgbe_lsominct}, + {"prommode", &txgbe_prommode}, + {"txdscqsz", &txgbe_txdscqsz}, + {"rxdscqsz", &txgbe_rxdscqsz}, + {"txqavg", &txgbe_txqavg}, + {"rxqavg", &txgbe_rxqavg}, + {"iovotype", &txgbe_iovotype}, + {"funcnbr", &txgbe_funcnbr}, + {"pciebnbr", &txgbe_pciebnbr}, + {"", NULL} +}; + +struct txgbe_proc_type txgbe_internal_entries[] = { + {"temp", &txgbe_therm_temp}, + {"alarmthresh", &txgbe_therm_alarmthresh}, + {"dealarmthresh", &txgbe_therm_dealarmthresh}, + {"", NULL} +}; + +void txgbe_del_proc_entries(struct txgbe_adapter *adapter) +{ + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (txgbe_top_dir == NULL) + return; + + for (i = 0; i < TXGBE_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (txgbe_internal_entries[index].read == NULL) + break; + + remove_proc_entry(txgbe_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (txgbe_proc_entries[index].read == NULL) + break; + remove_proc_entry(txgbe_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), txgbe_top_dir); +} + +/* called from txgbe_main.c */ +void txgbe_procfs_exit(struct txgbe_adapter *adapter) +{ + txgbe_del_proc_entries(adapter); +} + +int txgbe_procfs_topdir_init(void) +{ + txgbe_top_dir = proc_mkdir("driver/txgbe", NULL); + if (txgbe_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void txgbe_procfs_topdir_exit(void) +{ + remove_proc_entry("driver/txgbe", NULL); +} + +/* called from txgbe_main.c */ +int txgbe_procfs_init(struct txgbe_adapter *adapter) +{ + int rc = 0; + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + adapter->therm_dir = NULL; + + if (txgbe_top_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), txgbe_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (txgbe_proc_entries[index].read == NULL) + break; + if (!(create_proc_read_entry(txgbe_proc_entries[index].name, + 0444, + adapter->info_dir, + txgbe_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (!TCALL(&(adapter->hw), ops.init_thermal_sensor_thresh)) + goto exit; + + + snprintf(buf, sizeof(buf), "sensor"); + adapter->therm_dir = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (txgbe_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data.hw = &adapter->hw; + adapter->therm_data.sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor; + + if (!(create_proc_read_entry( + txgbe_internal_entries[index].name, + 0444, + adapter->therm_dir, + txgbe_internal_entries[index].read, + &adapter->therm_data))) { + rc = -ENOMEM; + goto fail; + } + } + + goto exit; + +fail: + txgbe_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !TXGBE_SYSFS */ +#endif /* TXGBE_PROCFS */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..24474c0cae2d2fe5c5855269a6460ba4ecaeb930 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c @@ -0,0 +1,1193 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_ptp.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" +#include "txgbe_hw.h" +#include + +/* + * SYSTIME is defined by a fixed point system which allows the user to + * define the scale counter increment value at every level change of + * the oscillator driving SYSTIME value. The time unit is determined by + * the clock frequency of the oscillator and TIMINCA register. + * The cyclecounter and timecounter structures are used to to convert + * the scale counter into nanoseconds. SYSTIME registers need to be converted + * to ns values by use of only a right shift. + * The following math determines the largest incvalue that will fit into + * the available bits in the TIMINCA register: + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator, which changes based on the link + * speed: + * At 10Gb link or no link, the period is 6.4 ns. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * round(): discard the fractional portion of the calculation + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * LinkSpeed ClockFreq ClockPeriod TIMINCA:IV + * 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns) + * 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns) + * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) + * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) + * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) + * AMLITE 400MHZ 2.5*10^-9 0x0A0000 + * These diagrams are only for the 10Gb link period + * + * +--------------+ +--------------+ + * | 32 | | 8 | 3 | 20 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 43 bit SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define TXGBE_INCVAL_10GB 0xCCCCCC +#define TXGBE_INCVAL_1GB 0x800000 +#define TXGBE_INCVAL_100 0xA00000 +#define TXGBE_INCVAL_10 0xC7F380 +#define TXGBE_INCVAL_FPGA 0x800000 +#define TXGBE_INCVAL_AML 0xA00000 + +#define TXGBE_INCVAL_SHIFT_10GB 20 +#define TXGBE_INCVAL_SHIFT_1GB 18 +#define TXGBE_INCVAL_SHIFT_100 15 +#define TXGBE_INCVAL_SHIFT_10 12 +#define TXGBE_INCVAL_SHIFT_FPGA 17 +#define TXGBE_INCVAL_SHIFT_AML 21 + +#define TXGBE_OVERFLOW_PERIOD (HZ * 30) +#define TXGBE_PTP_TX_TIMEOUT (HZ) + +#define NS_PER_SEC 1000000000ULL +#define NS_PER_MSEC 1000000ULL + +static void txgbe_ptp_setup_sdp(struct txgbe_adapter *adapter) +{ + struct cyclecounter *cc = &adapter->hw_cc; + struct txgbe_hw *hw = &adapter->hw; + u32 tsauxc, rem, tssdp, tssdp1; + u32 trgttiml0,trgttimh0, trgttiml1, trgttimh1; + u64 ns = 0; + unsigned long flags; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + if (TXGBE_1588_PPS_WIDTH * NS_PER_MSEC >= NS_PER_SEC) { + e_dev_err("PTP pps width cannot be longer than 1s!\n"); + return; + } + + /* disable the pin first */ + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, 0); + TXGBE_WRITE_FLUSH(hw); + + if (!(adapter->flags2 & TXGBE_FLAG2_PTP_PPS_ENABLED)) { + if (adapter->pps_enabled == 1) { + adapter->pps_enabled = 0; + if (TXGBE_1588_TOD_ENABLE) + txgbe_set_pps(hw, adapter->pps_enabled, 0, 0); + } + return; + } + + adapter->pps_enabled = 1; + + tssdp = TXGBE_TSEC_1588_SDP_FUN_SEL_TT0; + tssdp |= TXGBE_1588_PPS_LEVEL ? TXGBE_TSEC_1588_SDP_OUT_LEVEL_HIGH : TXGBE_TSEC_1588_SDP_OUT_LEVEL_LOW; + tsauxc = TXGBE_TSEC_1588_AUX_CTL_PLSG | TXGBE_TSEC_1588_AUX_CTL_EN_TT0 | + TXGBE_TSEC_1588_AUX_CTL_EN_TT1 | TXGBE_TSEC_1588_AUX_CTL_EN_TS0; + + tssdp1 = TXGBE_TSEC_1588_SDP_FUN_SEL_TS0; + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + adapter->pps_edge_start = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->pps_edge_end = adapter->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + adapter->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + trgttiml0 = (u32)adapter->pps_edge_start; + trgttimh0 = (u32)(adapter->pps_edge_start >> 32); + + if (TXGBE_1588_TOD_ENABLE) + txgbe_set_pps(hw, adapter->pps_enabled, ns + rem, adapter->pps_edge_start); + + rem += TXGBE_1588_PPS_WIDTH * NS_PER_MSEC; + adapter->pps_edge_end += div_u64(((u64)rem << cc->shift), cc->mult); + trgttiml1 = (u32)adapter->pps_edge_end; + trgttimh1 = (u32)(adapter->pps_edge_end >> 32); + + wr32(hw, TXGBE_TSEC_1588_TRGT_L(0), trgttiml0); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(0), trgttimh0); + wr32(hw, TXGBE_TSEC_1588_TRGT_L(1), trgttiml1); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(1), trgttimh1); + wr32(hw, TXGBE_TSEC_1588_SDP(0), tssdp); + wr32(hw, TXGBE_TSEC_1588_SDP(1), tssdp1); + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, tsauxc); + wr32(hw, TXGBE_TSEC_1588_INT_EN, TXGBE_TSEC_1588_INT_EN_TT1); + TXGBE_WRITE_FLUSH(hw); + + rem = NS_PER_SEC; + /* Adjust the clock edge to align with the next full second. */ + adapter->sec_to_cc = div_u64(((u64)rem << cc->shift), cc->mult); +} + +/** + * txgbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 txgbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct txgbe_adapter *adapter = + container_of(hw_cc, struct txgbe_adapter, hw_cc); + struct txgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIML); + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32; + + return stamp; +} + +/** + * txgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void txgbe_ptp_convert_to_hwtstamp(struct txgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE +/** + * txgbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int txgbe_ptp_adjfine(struct ptp_clock_info *ptp, long ppb) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + u64 incval; + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + incval = adjust_by_scaled_ppm(incval, ppb); + + if (incval > TXGBE_TSC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + return 0; +} + +#else +/** + * txgbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int txgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + + if (incval > TXGBE_TSC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + return 0; +} +#endif + +/** + * txgbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int txgbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + return 0; +} + +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +/** + * txgbe_ptp_gettimex + * @ptp: the ptp clock structure + * @ts: timespec to hold the PHC timestamp + * @sts: structure to hold the system time before and after reading the PHC + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int txgbe_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + u64 ns, stamp; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ptp_read_system_prets(sts); + stamp = rd32(hw, TXGBE_TSC_1588_SYSTIML); + ptp_read_system_postts(sts); + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32; + + ns = timecounter_cyc2time(&adapter->hw_tc, stamp); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} +#endif +#endif + +/** + * txgbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int txgbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * txgbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int txgbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + return 0; +} + +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +static int txgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = txgbe_ptp_gettime64(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + + return 0; +} + +static int txgbe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return txgbe_ptp_settime64(ptp, &ts64); +} +#endif + +/** + * txgbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int txgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -ENOTSUPP; + + if (on) + adapter->flags2 |= TXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + + adapter->ptp_setup_sdp(adapter); + return 0; + } + + return -ENOTSUPP; +} + +/** + * txgbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct cyclecounter *cc = &adapter->hw_cc; + u32 tsauxc, rem, int_status; + u32 trgttiml0,trgttimh0, trgttiml1, trgttimh1; + u64 ns = 0; + unsigned long flags; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + int_status = rd32(hw, TXGBE_TSEC_1588_INT_ST); + if (int_status & TXGBE_TSEC_1588_INT_ST_TT1) { + /* disable the pin first */ + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, 0); + TXGBE_WRITE_FLUSH(hw); + + tsauxc = TXGBE_TSEC_1588_AUX_CTL_PLSG | TXGBE_TSEC_1588_AUX_CTL_EN_TT0 | + TXGBE_TSEC_1588_AUX_CTL_EN_TT1 | TXGBE_TSEC_1588_AUX_CTL_EN_TS0; + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + adapter->pps_edge_start = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->pps_edge_end = adapter->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + adapter->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + + /* Adjust the clock edge to align with the next full second. */ + trgttiml0 = (u32)adapter->pps_edge_start; + trgttimh0 = (u32)(adapter->pps_edge_start >> 32); + + rem += TXGBE_1588_PPS_WIDTH * NS_PER_MSEC; + adapter->pps_edge_end += div_u64(((u64)rem << cc->shift), cc->mult); + + trgttiml1 = (u32)adapter->pps_edge_end; + trgttimh1 = (u32)(adapter->pps_edge_end >> 32); + + wr32(hw, TXGBE_TSEC_1588_TRGT_L(0), trgttiml0); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(0), trgttimh0); + wr32(hw, TXGBE_TSEC_1588_TRGT_L(1), trgttiml1); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(1), trgttimh1); + + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, tsauxc); + TXGBE_WRITE_FLUSH(hw); + } + } + /* we don't config PPS on SDP for txgbe_mac_sp yet, so just return. + * ptp_clock_event(adapter->ptp_clock, &event); + */ +} + +/** + * txgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + TXGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + txgbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * txgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5*HZ)) { + rd32(hw, TXGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * txgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void txgbe_ptp_clear_tx_timestamp(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + rd32(hw, TXGBE_TSC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * txgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void txgbe_ptp_tx_hwtstamp(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPL); + regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPH) << 32; + + txgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + txgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * txgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void txgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, struct txgbe_adapter, + ptp_tx_work); + struct txgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + TXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + txgbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, TXGBE_TSC_1588_CTL); + if (tsynctxctl & TXGBE_TSC_1588_CTL_VALID) { + txgbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + txgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * txgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL); + if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPH) << 32; + + txgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * txgbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * txgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private txgbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int txgbe_ptp_set_timestamp_mode(struct txgbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = TXGBE_TSC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = TXGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, TXGBE_TSC_1588_CTL); + regval &= ~TXGBE_TSC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, TXGBE_TSC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, TXGBE_PSR_1588_CTL); + regval &= ~(TXGBE_PSR_1588_CTL_ENABLED | TXGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, TXGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, TXGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + TXGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + txgbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, TXGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * txgbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = txgbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void txgbe_ptp_link_speed_adjust(struct txgbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + struct txgbe_hw *hw = &adapter->hw; + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + + /*amlite TODO*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + *shift = TXGBE_INCVAL_SHIFT_AML; + *incval = TXGBE_INCVAL_AML; + } else { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10_FULL: + *shift = TXGBE_INCVAL_SHIFT_10; + *incval = TXGBE_INCVAL_10; + break; + case TXGBE_LINK_SPEED_100_FULL: + *shift = TXGBE_INCVAL_SHIFT_100; + *incval = TXGBE_INCVAL_100; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + *shift = TXGBE_INCVAL_SHIFT_1GB; + *incval = TXGBE_INCVAL_1GB; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + default: /* TXGBE_LINK_SPEED_10GB_FULL */ + *shift = TXGBE_INCVAL_SHIFT_10GB; + *incval = TXGBE_INCVAL_10GB; + break; + } + } + return; +} + +/** + * txgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = txgbe_ptp_read; + txgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +static void txgbe_ptp_init_systime(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + wr32(hw, TXGBE_TSC_1588_SYSTIML, 0); + wr32(hw, TXGBE_TSC_1588_SYSTIMH, 0); + TXGBE_WRITE_FLUSH(hw); +} + +/** + * txgbe_ptp_reset + * @adapter: the txgbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void txgbe_ptp_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the hardware timestamping mode */ + txgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + txgbe_ptp_start_cyclecounter(adapter); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_ptp_init_systime(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); +} + +/** + * txgbe_ptp_create_clock + * @adapter: the txgbe private adapter structure + * + * This functino performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by txgbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ + +static long txgbe_ptp_create_clock(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->ptp_caps.pps = 1; + else + adapter->ptp_caps.pps = 0; + +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE + adapter->ptp_caps.adjfine = txgbe_ptp_adjfine; +#else + adapter->ptp_caps.adjfreq = txgbe_ptp_adjfreq; +#endif + adapter->ptp_caps.adjtime = txgbe_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL + adapter->ptp_caps.gettimex64 = txgbe_ptp_gettimex; +#else + adapter->ptp_caps.gettime64 = txgbe_ptp_gettime64; +#endif /* HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL */ + adapter->ptp_caps.settime64 = txgbe_ptp_settime64; +#else + adapter->ptp_caps.gettime = txgbe_ptp_gettime; + adapter->ptp_caps.settime = txgbe_ptp_settime; +#endif + adapter->ptp_caps.enable = txgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = txgbe_ptp_setup_sdp; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * txgbe_ptp_init + * @adapter: the txgbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void txgbe_ptp_init(struct txgbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (txgbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can intialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, txgbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + txgbe_ptp_reset(adapter); + + /* enter the TXGBE_PTP_RUNNING state */ + set_bit(__TXGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * txgbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void txgbe_ptp_suspend(struct txgbe_adapter *adapter) +{ + /* leave the TXGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + cancel_work_sync(&adapter->ptp_tx_work); + txgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * txgbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void txgbe_ptp_stop(struct txgbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + txgbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..2c5e0f8542d29445023b6f7f5ec61f8ae4dc1e05 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c @@ -0,0 +1,2171 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_sriov.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_sriov.h" + +static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf); +static int txgbe_set_queue_rate_limit_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf); + +#ifdef CONFIG_PCI_IOV +static int __txgbe_enable_sriov(struct txgbe_adapter *adapter, + unsigned int num_vfs) +{ + struct txgbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + u32 value = 0; + + adapter->flags |= TXGBE_FLAG_SRIOV_ENABLED; + e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); + + if (num_vfs != 1) { + if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (TXGBE_MAX_PF_MACVLANS + 1 + num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = kcalloc(num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (!adapter->vfinfo) + return -ENOMEM; + + adapter->num_vfs = num_vfs; + + /* enable L2 switch and replication */ + adapter->flags |= TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + TXGBE_FLAG_SRIOV_REPLICATION_ENABLE; + + /* limit traffic classes based on VFs enabled */ + if (adapter->num_vfs < 16) { + adapter->dcb_cfg.num_tcs.pg_tcs = + TXGBE_DCB_MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = + TXGBE_DCB_MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + adapter->dcb_cfg.vt_mode = true; + +#ifdef TXGBE_DISABLE_VF_MQ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; +#endif + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(TXGBE_FLAG2_RSC_CAPABLE | + TXGBE_FLAG2_RSC_ENABLED); + + /* enable spoof checking for all VFs */ + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + adapter->vfinfo[i].link_enable = true; + adapter->vfinfo[i].link_state = TXGBE_VF_LINK_STATE_AUTO; + +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + +#endif + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = TXGBEVF_XCAST_MODE_NONE; + } + + return 0; +} + +#define TXGBE_BA4_ADDR(vfinfo, reg) \ + ((u8 __iomem *)((u8 *)(vfinfo)->b4_addr + (reg))) +static int txgbe_vf_backup(struct txgbe_adapter *adapter, u16 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->b4_addr) + return -1; +#if 0 + for (i = 0; i < 16; i++) { + vfinfo->b4_buf[i] = + txgbe_rd32(TXGBE_BA4_ADDR(vfinfo, i * 4)); + } +#endif + return 0; +} + +static int txgbe_vf_restore(struct txgbe_adapter *adapter, u16 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->b4_addr) + return -1; +#if 0 + for (i = 0; i < 16; i++) { + txgbe_wr32(TXGBE_BA4_ADDR(vfinfo, i * 4), + vfinfo->b4_buf[i]); + } +#endif + return 0; +} + +/** + * txgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void txgbe_get_vfs(struct txgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + struct vf_data_storage *vfinfo; + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + + /*pci_dev_get(vfdev);*/ + vfinfo = &adapter->vfinfo[vf]; + vfinfo->vfdev = vfdev; + vfinfo->b4_addr = ioremap(pci_resource_start(vfdev, 4), 64); +#ifdef CONFIG_PCI_IOV + txgbe_vf_backup(adapter, vf); +#endif + ++vf; + } +} + +/** + * txgbe_pet_vfs - Release references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void txgbe_put_vfs(struct txgbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct vf_data_storage *vfinfo; + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + +#ifdef CONFIG_PCI_IOV + txgbe_vf_restore(adapter, vf); +#endif + + vfinfo = &adapter->vfinfo[vf]; + iounmap(vfinfo->b4_addr); + vfinfo->b4_addr = NULL; + vfinfo->vfdev = NULL; + /*pci_dev_put(vfdev);*/ + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void txgbe_enable_sriov(struct txgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + unsigned int num_vfs; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->max_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device -" + "Please reload all VF drivers to avoid spoofed packet " + "errors\n"); + } else { + int err; + /* + * The sapphire/amber-lite supports up to 64 VFs per physical + * function but this implementation limits allocation to 63 so + * that basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + num_vfs = min_t(unsigned int, adapter->max_vfs, + TXGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__txgbe_enable_sriov(adapter, num_vfs)) { + txgbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + txgbe_disable_sriov(adapter); +} +#endif /* CONFIG_PCI_IOV */ + +int txgbe_disable_sriov(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ +#ifdef CONFIG_PCI_IOV + txgbe_put_vfs(adapter); +#endif + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return 0; + +#if 0 +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif +#endif + /* set default pool back to 0 */ + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK, 0); + TXGBE_WRITE_FLUSH(hw); + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + /* take a breather then clean up driver data */ + msleep(100); + + adapter->flags &= ~(TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + TXGBE_FLAG_SRIOV_REPLICATION_ENABLE); + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + } + + return 0; +} + +static int txgbe_set_vf_multicasts(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u16 entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) + >> TXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct txgbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + + /* only so many hash values supported */ + entries = min(entries, (u16)TXGBE_MAX_VF_MC_ENTRIES); + + /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* VFs are limited to using the MTA hash table for their multicast + * addresses */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + /* errata 5: maintain a copy of the register table conf */ + mta_reg = hw->mac.mta_shadow[vector_reg]; + mta_reg |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] = mta_reg; + wr32(hw, TXGBE_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + + return 0; +} + +void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + u32 i, j; + u32 vector_bit; + u32 vector_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(i)); + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + wr32m(hw, TXGBE_PSR_MC_TBL(vector_reg), + 1 << vector_bit, 1 << vector_bit); + /* errata 5: maintain a copy of the reg table conf */ + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + txgbe_full_sync_mac_table(adapter); +} + +int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return TCALL(hw, mac.ops.set_vfta, vid, vf, (bool)add); +} + +static int txgbe_set_vf_lpe(struct txgbe_adapter *adapter, u32 max_frame, + u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 max_frs, reg_val; + + /* + * For sapphire/amber-lite we have to keep all PFs and VFs operating + * with the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = rd32(hw, TXGBE_RDM_VF_RE(reg_offset)); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), vfre); + + /* pull current max frame size from hardware */ + max_frs = rd32(hw, TXGBE_PSR_MAX_SZ); + if (max_frs < max_frame) { + wr32(hw, TXGBE_PSR_MAX_SZ, max_frame); + } + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(hw, TXGBE_MAC_WDG_TIMEOUT) & + TXGBE_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA)) { + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, + max_frs - TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe) +{ + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + vmolr |= TXGBE_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= TXGBE_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~TXGBE_PSR_VM_L2CTL_AUPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); +} + +static void txgbe_set_vmvir(struct txgbe_adapter *adapter, + u16 vid, u16 qos, u16 vf, __be16 vlan_proto) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + TXGBE_TDM_VLAN_INS_VLANA_DEFAULT; + + if (vlan_proto == htons(ETH_P_8021AD)) + vmvir |= 1 << TXGBE_TDM_VLAN_INS_TPID_SEL_SHIFT; + wr32(hw, TXGBE_TDM_VLAN_INS(vf), vmvir); +} + +static void txgbe_clear_vmvir(struct txgbe_adapter *adapter, u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_TDM_VLAN_INS(vf), 0); +} + +static inline void txgbe_vf_reset_event(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + txgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + txgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + txgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + txgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf, vfinfo->vlan_proto); + else + txgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf, vfinfo->vlan_proto); + + if (vfinfo->spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + txgbe_set_rx_mode(adapter->netdev); + + txgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = txgbe_mbox_api_10; +} + +int txgbe_set_vf_mac(struct txgbe_adapter *adapter, + u16 vf, unsigned char *mac_addr) +{ + s32 retval = 0; + txgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = txgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, + ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; +} + +static int txgbe_negotiate_vf_api(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case txgbe_mbox_api_10: + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int txgbe_get_vf_queues(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_22: + case txgbe_mbox_api_21: + case txgbe_mbox_api_20: + case txgbe_mbox_api_13: + case txgbe_mbox_api_12: + case txgbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[TXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[TXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* if TCs > 1 determine which TC belongs to default user priority */ + if (num_tcs > 1) + default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[TXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[TXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int txgbe_set_vf_macvlan(struct txgbe_adapter *adapter, + u16 vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + txgbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + retval = txgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +#ifdef CONFIG_PCI_IOV +int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static inline void txgbe_write_qde(struct txgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0; + u32 i = vf * q_per_pool; + u32 n = i / 32; + reg = rd32(hw, TXGBE_RDM_PF_QDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (qde == 1) + reg |= qde << i; + else + reg &= qde << i; + } + + wr32(hw, TXGBE_RDM_PF_QDE(n), reg); + +} + +static inline void txgbe_write_hide_vlan(struct txgbe_adapter *adapter, u32 vf, + u32 hide_vlan) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0; + u32 i = vf * q_per_pool; + u32 n = i / 32; + reg = rd32(hw, TXGBE_RDM_PF_HIDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (hide_vlan == 1) + reg |= hide_vlan << i; + else + reg &= hide_vlan << i; + } + + wr32(hw, TXGBE_RDM_PF_HIDE(n), reg); +} + +static int txgbe_vf_reset_msg(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg = 0; + u32 reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + struct net_device *dev = adapter->netdev; + int pf_max_frame; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + +#ifdef CONFIG_PCI_IOV + txgbe_vf_restore(adapter, vf); +#endif + + /* reset the filters for the device */ + txgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + txgbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* force drop enable for all VF Rx queues */ + txgbe_write_qde(adapter, vf, 1); + + /* set transmit and receive for vf */ + txgbe_set_vf_rx_tx(adapter, vf); + + pf_max_frame = dev->mtu + ETH_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (pf_max_frame > ETH_FRAME_LEN) + reg = (1 << vf_shift); + wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = TXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to " + "assign one manually\n", vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int txgbe_set_vf_mac_addr(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; + e_warn(drv, + "VF %d attempted to set a new MAC address but it already " + "has an administratively set MAC address " + "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + e_warn(drv, "Check the VF driver and if it is not using the " + "correct MAC address you may need to reload the VF " + "driver\n"); + return -1; + } + return txgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +#ifdef CONFIG_PCI_IOV +static int txgbe_find_vlvf_entry(struct txgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) + regindex = -1; + + return regindex; +} +#endif /* CONFIG_PCI_IOV */ + +static int txgbe_set_vf_vlan_msg(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> TXGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & TXGBE_PSR_VLAN_SWC_VLANID_MASK); + int vlan_offload = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> TXGBE_VT_MSGINFO_VLAN_OFFLOAD_SHIFT; + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + if (!vlan_offload) + return 0; + else { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = txgbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + u32 bits, vlvf; + s32 reg_ndx; + + reg_ndx = txgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(hw, TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << VMDQ_P(0)); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= rd32(hw, TXGBE_PSR_VLAN_SWC_VM_L); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && +#ifndef HAVE_VLAN_RX_REGISTER + !test_bit(vid, adapter->active_vlans) && +#endif + !bits) + txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif + return err; +} + +static int txgbe_set_vf_macvlan_msg(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> + TXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted + && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return 0; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } +#if defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + txgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); +#endif /* defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) */ + } + + err = txgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no " + "space for it\n", + vf); + + return err < 0; +} + +static int txgbe_update_vf_xcast_mode(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case txgbe_mbox_api_13: + case txgbe_mbox_api_20: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > TXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = TXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case TXGBEVF_XCAST_MODE_NONE: + disable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case TXGBEVF_XCAST_MODE_MULTI: + disable = TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE; + break; + case TXGBEVF_XCAST_MODE_ALLMULTI: + disable = TXGBE_PSR_VM_L2CTL_UPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_VPE; + break; + case TXGBEVF_XCAST_MODE_PROMISC: + disable = 0; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int txgbe_get_vf_link_state(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *link_state = &msgbuf[1]; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + break; + default: + return -EOPNOTSUPP; + } + *link_state = adapter->vfinfo[vf].link_state; + + return 0; +} + +static int txgbe_get_fw_version(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *fw_version = &msgbuf[1]; + char *end = NULL; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + break; + default: + return -EOPNOTSUPP; + } + + *fw_version = simple_strtoul(adapter->eeprom_id, &end, 16); + if (adapter->eeprom_id == end || strlen(end)) + return -EOPNOTSUPP; + + return 0; +} + +static int txgbe_add_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_5tuple_filter_info *filter = &adapter->ft_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 index, sw_idx, i, j; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (sw_idx = 0; sw_idx < TXGBE_MAX_RDB_5T_CTL0_FILTERS; sw_idx++) { + i = sw_idx / (sizeof(uint32_t) * 8); + j = sw_idx % (sizeof(uint32_t) * 8); + if (!(filter->fivetuple_mask[i] & (1 << j))) { + filter->fivetuple_mask[i] |= 1 << j; + break; + } + } + if (sw_idx >= TXGBE_MAX_RDB_5T_CTL0_FILTERS) { + e_err(drv, "5tuple filters are full.\n"); + return -ENOSYS; + } + + /* convert filter index on each vf to the global index */ + index = msgbuf[TXGBEVF_5T_CMD] & 0xFFFF; + adapter->vfinfo[vf].ft_filter_idx[index] = sw_idx; + + /* pool index */ + msgbuf[TXGBEVF_5T_CTRL0] |= vf << TXGBE_RDB_5T_CTL0_POOL_SHIFT; + /* compute absolute queue index */ + msgbuf[TXGBEVF_5T_CTRL1] += (vf * adapter->num_rx_queues_per_pool) << + TXGBE_RDB_5T_CTL1_RING_SHIFT; + + wr32(hw, TXGBE_RDB_5T_CTL0(sw_idx), msgbuf[TXGBEVF_5T_CTRL0]); + wr32(hw, TXGBE_RDB_5T_CTL1(sw_idx), msgbuf[TXGBEVF_5T_CTRL1]); + wr32(hw, TXGBE_RDB_5T_SDP(sw_idx), msgbuf[TXGBEVF_5T_PORT]); + wr32(hw, TXGBE_RDB_5T_DA(sw_idx), msgbuf[TXGBEVF_5T_DA]); + wr32(hw, TXGBE_RDB_5T_SA(sw_idx), msgbuf[TXGBEVF_5T_SA]); + + return 0; +} + +static void txgbe_del_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 cmd, u32 vf) +{ + struct txgbe_5tuple_filter_info *filter = &adapter->ft_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 index, sw_idx; + + /* convert the global index to filter index on each vf */ + index = cmd & 0xFFFF; + sw_idx = adapter->vfinfo[vf].ft_filter_idx[index]; + + filter->fivetuple_mask[sw_idx / (sizeof(uint32_t) * 8)] &= + ~(1 << (sw_idx % (sizeof(uint32_t) * 8))); + + wr32(hw, TXGBE_RDB_5T_CTL0(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_CTL1(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_SDP(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_DA(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_SA(sw_idx), 0); +} + +static int txgbe_set_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 cmd = msgbuf[TXGBEVF_5T_CMD]; + bool add; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api < txgbe_mbox_api_21) + return -EOPNOTSUPP; + + add = !!(cmd & BIT(TXGBEVF_5T_ADD_SHIFT)); + if (add) + return txgbe_add_5tuple_filter_vf(adapter, msgbuf, vf); + + txgbe_del_5tuple_filter_vf(adapter, cmd, vf); + + return 0; +} + +static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) +{ + u16 mbx_size = TXGBE_VXMAILBOX_SIZE; + u32 msgbuf[TXGBE_VXMAILBOX_SIZE]; + struct txgbe_hw *hw = &adapter->hw; + s32 retval; + + retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + TXGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == TXGBE_VF_RESET) + return txgbe_vf_reset_msg(adapter, vf); + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + txgbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case TXGBE_VF_SET_MAC_ADDR: + retval = txgbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_MULTICAST: + retval = txgbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_VLAN: + retval = txgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_LPE: + if (msgbuf[1] > TXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); + return -EINVAL; + } + retval = txgbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case TXGBE_VF_SET_MACVLAN: + retval = txgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case TXGBE_VF_API_NEGOTIATE: + retval = txgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_QUEUES: + retval = txgbe_get_vf_queues(adapter, msgbuf, vf); + break; + case TXGBE_VF_UPDATE_XCAST_MODE: + retval = txgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_LINK_STATE: + retval = txgbe_get_vf_link_state(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_FW_VERSION: + retval = txgbe_get_fw_version(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_5TUPLE: + retval = txgbe_set_5tuple_filter_vf(adapter, msgbuf, vf); + break; + case TXGBE_VF_QUEUE_RATE_LIMIT: + retval = txgbe_set_queue_rate_limit_vf(adapter, msgbuf, vf); + break; + case TXGBE_VF_BACKUP: +#ifdef CONFIG_PCI_IOV + retval = txgbe_vf_backup(adapter, vf); +#endif + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = TXGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + + txgbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void txgbe_rcv_ack_from_vf(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msg = TXGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + txgbe_write_mbx(hw, &msg, 1, vf); +} + +void txgbe_msg_task(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!txgbe_check_for_rst(hw, vf)) + txgbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!txgbe_check_for_msg(hw, vf)) + txgbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!txgbe_check_for_ack(hw, vf)) + txgbe_rcv_ack_from_vf(adapter, vf); + } +} + +void txgbe_disable_tx_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + wr32(hw, TXGBE_TDM_VF_TE(0), 0); + wr32(hw, TXGBE_TDM_VF_TE(1), 0); + + wr32(hw, TXGBE_RDM_VF_RE(0), 0); + wr32(hw, TXGBE_RDM_VF_RE(1), 0); +} + +static inline void txgbe_ping_vf(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = TXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, &ping, 1, vf); +} + +void txgbe_ping_all_vfs(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 ping; + u16 i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = TXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, &ping, 1, i); + } +} + + +void txgbe_ping_vf_with_link_status(struct txgbe_adapter *adapter, bool link_up, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msgbuf[2] = {0, 0}; + + if (vf > adapter->num_vfs) + return; + + msgbuf[0] = TXGBE_PF_NOFITY_VF_LINK_STATUS | TXGBE_PF_CONTROL_MSG; + msgbuf[1] = (adapter->speed << 1) | link_up; + //if (adapter->notify_down) + // msgbuf[1] |= TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING; + if (adapter->vfinfo[vf].clear_to_send) + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, msgbuf, 2, vf); +} + +void txgbe_ping_all_vfs_with_link_status(struct txgbe_adapter *adapter, bool link_up) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msgbuf[2] = {0, 0}; + u16 i; + + if (!adapter->num_vfs) + return; + + msgbuf[0] = TXGBE_PF_NOFITY_VF_LINK_STATUS | TXGBE_PF_CONTROL_MSG; + if (link_up) + msgbuf[1] = (adapter->speed << 1) | link_up; + //if (adapter->notify_down) + // msgbuf[1] |= TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING; + for (i = 0 ; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, msgbuf, 2, i); + } +} + +/** + * txgbe_set_all_vfs - update vfs queues + * @adapter: Pointer to adapter struct + * + * Update setting transmit and receive queues for all vfs + **/ +void txgbe_set_all_vfs(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + txgbe_set_vf_link_state(adapter, i, + adapter->vfinfo[i].link_state); + } +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + txgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} +#endif + +static int txgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, + int __maybe_unused num_vfs) +{ + int err = 0; +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = pci_get_drvdata(dev); + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = txgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if ((num_vfs + adapter->num_vmdqs) > TXGBE_MAX_VF_FUNCTIONS) { + err = -EPERM; + goto err_out; + } + + err = __txgbe_enable_sriov(adapter, num_vfs); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + txgbe_vf_configuration(dev, (i | 0x10000000)); + + /* reset before enabling SRIOV to avoid mailbox issues */ + txgbe_sriov_reinit(adapter); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + txgbe_get_vfs(adapter); + +out: + return num_vfs; + +err_out: +#endif + + return err; +} + +static int txgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(dev); + int err; +#ifdef CONFIG_PCI_IOV + u32 current_flags = adapter->flags; +#endif + + err = txgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ +#ifdef CONFIG_PCI_IOV + if (!err && current_flags != adapter->flags) + txgbe_sriov_reinit(adapter); +#endif + + return err; +} + +int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return txgbe_pci_sriov_disable(dev); + else + return txgbe_pci_sriov_enable(dev, num_vfs); +} + +#ifdef IFLA_VF_MAX +int txgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + s32 retval = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || (vf >= adapter->num_vfs)) + return -EINVAL; + + if (is_valid_ether_addr(mac)) { + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = txgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF " + "device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to " + "use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address was NOT set due to invalid or " + "duplicate MAC address.\n"); + } + } else if (is_zero_ether_addr(mac)) { + unsigned char *vf_mac_addr = + adapter->vfinfo[vf].vf_mac_addresses; + + /* nothing to do */ + if (is_zero_ether_addr(vf_mac_addr)) + return 0; + + dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", + vf); + + retval = txgbe_del_mac_filter(adapter, vf_mac_addr, vf); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = false; + memcpy(vf_mac_addr, mac, ETH_ALEN); + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); + } + } else { + retval = -EINVAL; + } + return retval; +} + +static int txgbe_enable_port_vlan(struct txgbe_adapter *adapter, + int vf, u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + + err = txgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + txgbe_set_vmvir(adapter, vlan, qos, vf, vlan_proto); + txgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + adapter->vfinfo[vf].vlan_count++; + /* enable hide vlan */ + txgbe_write_qde(adapter, vf, 1); + txgbe_write_hide_vlan(adapter, vf, 1); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + adapter->vfinfo[vf].vlan_proto = vlan_proto; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not " + "up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF " + "device.\n"); + } + +out: + return err; +} + +static int txgbe_disable_port_vlan(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + + err = txgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + txgbe_clear_vmvir(adapter, vf); + txgbe_set_vmolr(hw, vf, true); + TCALL(hw, mac.ops.set_vlan_anti_spoofing, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + /* disable hide vlan */ + txgbe_write_hide_vlan(adapter, vf, 0); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + adapter->vfinfo[vf].vlan_proto = 0; + + return err; +} +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +#else +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); +#ifndef IFLA_VF_VLAN_INFO_MAX + __be16 vlan_proto = htons(ETH_P_8021Q); +#endif + int err = 0; + + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) + return -EPROTONOSUPPORT; +#endif + + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = txgbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = txgbe_enable_port_vlan(adapter, vf, vlan, qos, vlan_proto); + } else { + err = txgbe_disable_port_vlan(adapter, vf); + } +out: + return err; +} +#endif /* IFLA_VF_MAX */ + +int txgbe_link_mbps(struct txgbe_adapter *adapter) +{ + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + return 40000; + case TXGBE_LINK_SPEED_25GB_FULL: + return 25000; + case TXGBE_LINK_SPEED_10GB_FULL: + return 10000; + case TXGBE_LINK_SPEED_1GB_FULL: + return 1000; + default: + return 0; + } +} + +u16 txgbe_frac_to_bi(u16 frac, u16 denom, int max_bits) +{ + u16 value = 0; + + while (frac > 0 && max_bits > 0) { + max_bits -= 1; + frac *= 2; + if (frac >= denom) { + value |= BIT(max_bits); + frac -= denom; + } + } + + return value; +} + +static void txgbe_set_vf_rate_limit(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_hw *hw = &adapter->hw; + u32 bcnrc_val; + int factor_int; + int factor_fra; + int link_speed; + u16 queue, queues_per_pool; + u16 max_tx_rate = adapter->vfinfo[vf].max_tx_rate; +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + u16 min_tx_rate = adapter->vfinfo[vf].min_tx_rate; +#endif + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (max_tx_rate) { + u16 frac; + + link_speed = adapter->vf_rate_link_speed / 1000 * 1024; + + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + + wr32(hw, TXGBE_TDM_RL_VM_IDX, vf); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_VM_IDX, vf); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_RL_EN, 0); + } + } else { + max_tx_rate /= queues_per_pool; + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + min_tx_rate /= queues_per_pool; + bcnrc_val |= TXGBE_TDM_RP_RATE_MIN(min_tx_rate); +#endif + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + wr32(hw, TXGBE_TDM_RP_IDX, reg_idx); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } + } +} + +void txgbe_check_vf_rate_limit(struct txgbe_adapter *adapter) +{ + int i; + + /* VF Tx rate limit was not set */ + if (!adapter->vf_rate_link_speed) + return; + + if (txgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { + adapter->vf_rate_link_speed = 0; + dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vf_rate_link_speed) + adapter->vfinfo[i].max_tx_rate = 0; + + txgbe_set_vf_rate_limit(adapter, i); + } +} + +static int +txgbe_set_queue_rate_limit_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_hw *hw = &adapter->hw; + u16 queue, queues_per_pool, max_tx_rate; + int factor_int, factor_fra, link_speed; + u32 reg_idx; + + if (hw->mac.type != txgbe_mac_aml) + return -EOPNOTSUPP; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api < txgbe_mbox_api_22) + return -EOPNOTSUPP; + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + queue = msgbuf[TXGBEVF_Q_RATE_INDEX]; + max_tx_rate = msgbuf[TXGBEVF_Q_RATE_LIMIT]; + + /* convert queue index on each vf to the global index */ + reg_idx = (vf * queues_per_pool) + queue; + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (max_tx_rate) { + u16 frac; + + link_speed = txgbe_link_mbps(adapter) / 1000 * 1024; + + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, reg_idx); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, reg_idx); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, 0); + } + + adapter->vfinfo[vf].queue_max_tx_rate[queue] = max_tx_rate; + e_info(drv, "set vf %d queue %d max_tx_rate to %d Mbps", + vf, queue, max_tx_rate); + + return 0; +} + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int txgbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate) +#else +int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int link_speed; + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 1 or 10 Gbps */ + if (adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) + return -EINVAL; + + link_speed = txgbe_link_mbps(adapter); + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + + /* store values */ +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + adapter->vfinfo[vf].min_tx_rate = min_tx_rate; +#endif + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].max_tx_rate = max_tx_rate; + + /* update hardware configuration */ + txgbe_set_vf_rate_limit(adapter, vf); + + return 0; +} + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + if (vf < 32) { + regval = (setting << vf); + wr32m(hw, TXGBE_TDM_MAC_AS_L, + regval | (1 << vf), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, TXGBE_TDM_VLAN_AS_L, + regval | (1 << vf), regval); + } + } else { + regval = (setting << (vf - 32)); + wr32m(hw, TXGBE_TDM_MAC_AS_H, + regval | (1 << (vf - 32)), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, TXGBE_TDM_VLAN_AS_H, + regval | (1 << (vf - 32)), regval); + } + } + return 0; +} +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ + +/** + * txgbe_set_vf_rx_tx - Set VF rx tx + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * + * Set or reset correct transmit and receive for vf + **/ +static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + reg_cur_tx = rd32(hw, TXGBE_TDM_VF_TE(reg_offset)); + reg_cur_rx = rd32(hw, TXGBE_RDM_VF_RE(reg_offset)); + + if (adapter->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | 1 << vf_shift; + reg_req_rx = reg_cur_rx | 1 << vf_shift; + /* Enable particular VF */ + if (reg_cur_tx != reg_req_tx) + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), reg_req_rx); + } else { + reg_req_tx = 1 << vf_shift; + reg_req_rx = 1 << vf_shift; + /* Disable particular VF */ + if (reg_cur_tx & reg_req_tx) + wr32(hw, TXGBE_TDM_VFTE_CLR(reg_offset), reg_req_tx); + if (reg_cur_rx & reg_req_rx) + wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg_req_rx); + } + if(adapter->vfinfo[vf].link_state == IFLA_VF_LINK_STATE_ENABLE && + !(rd32(hw,TXGBE_MAC_TX_CFG) & TXGBE_MAC_TX_CFG_TE)) { + wr32m(hw,TXGBE_MAC_TX_CFG,TXGBE_MAC_TX_CFG_TE, + TXGBE_MAC_TX_CFG_TE); + TXGBE_WRITE_FLUSH(hw); + wr32m(hw,TXGBE_MAC_TX_CFG,TXGBE_MAC_TX_CFG_TE, + TXGBE_MAC_TX_CFG_TE); + } + + +} + +/** + * txgbe_set_vf_link_state - Set link state + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * @state: required link state + * + * Set a link force state on/off a single vf + **/ +void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state) +{ + bool link_up = adapter->link_up; + adapter->vfinfo[vf].link_state = state; + + switch (state) { + case TXGBE_VF_LINK_STATE_AUTO: + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->vfinfo[vf].link_enable = false; + } else { + link_up = adapter->link_up; + adapter->vfinfo[vf].link_enable = true; + } + break; + case TXGBE_VF_LINK_STATE_ENABLE: + adapter->vfinfo[vf].link_enable = true; + link_up = true; + break; + case TXGBE_VF_LINK_STATE_DISABLE: + adapter->vfinfo[vf].link_enable = false; + link_up = false; + break; + } + + /* restart the VF */ + adapter->vfinfo[vf].clear_to_send = false; + txgbe_ping_vf(adapter, vf); + + txgbe_ping_vf_with_link_status(adapter, link_up, vf); + + txgbe_set_vf_rx_tx(adapter, vf); +} + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + +/** + * txgbe_ndo_set_vf_link_state - Set link state + * @netdev: network interface device structure + * @vf: VF identifier + * @state: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF link - invalid VF identifier %d\n", vf); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state enable\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_ENABLE); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state disable\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_DISABLE); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state auto\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_AUTO); + break; + default: + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } +out: + return ret; +} +#endif /* HAVE_NDO_SET_VF_LINK_STATE */ + +int txgbe_trans_vf_link_state(int state) +{ + switch(state){ + case TXGBE_VF_LINK_STATE_ENABLE: + return IFLA_VF_LINK_STATE_ENABLE; + case TXGBE_VF_LINK_STATE_DISABLE: + return IFLA_VF_LINK_STATE_DISABLE; + case TXGBE_VF_LINK_STATE_AUTO: + return IFLA_VF_LINK_STATE_AUTO; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +int txgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + ivi->min_tx_rate = adapter->vfinfo[vf].min_tx_rate; +#else + ivi->tx_rate = adapter->vfinfo[vf].max_tx_rate; +#endif + + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef IFLA_VF_VLAN_INFO_MAX + ivi->vlan_proto = adapter->vfinfo[vf].vlan_proto; +#endif +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = adapter->vfinfo[vf].trusted; +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE + ivi->linkstate = txgbe_trans_vf_link_state(adapter->vfinfo[vf].link_state); +#endif + + return 0; +} +#endif /* IFLA_VF_MAX */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..1e119cc5c0afb633d12f1d8994b0ddf46dd38fcd --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h @@ -0,0 +1,85 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _TXGBE_SRIOV_H_ +#define _TXGBE_SRIOV_H_ + +/* txgbe driver limit the max number of VFs could be enabled to + * 63 (TXGBE_MAX_VF_FUNCTIONS - 1) + */ +#define TXGBE_MAX_VFS_DRV_LIMIT (TXGBE_MAX_VF_FUNCTIONS - 1) + +#define TXGBE_VF_LINK_STATE_DISABLE 0 +#define TXGBE_VF_LINK_STATE_AUTO 1 +#define TXGBE_VF_LINK_STATE_ENABLE 2 + +void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter); +int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf); +void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe); +void txgbe_msg_task(struct txgbe_adapter *adapter); +int txgbe_set_vf_mac(struct txgbe_adapter *adapter, + u16 vf, unsigned char *mac_addr); +void txgbe_disable_tx_rx(struct txgbe_adapter *adapter); +void txgbe_ping_all_vfs(struct txgbe_adapter *adapter); +void txgbe_ping_all_vfs_with_link_status(struct txgbe_adapter *adapter, bool link_up); +void txgbe_ping_vf_with_link_status(struct txgbe_adapter *adapter, bool link_up, u16 vf); +int txgbe_trans_vf_link_state(int state); +void txgbe_set_all_vfs(struct txgbe_adapter *adapter); +#ifdef IFLA_VF_MAX +int txgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); +#else +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); +#else +int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE +int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif +int txgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +#endif /* IFLA_VF_MAX */ +int txgbe_disable_sriov(struct txgbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void txgbe_enable_sriov(struct txgbe_adapter *adapter); +#endif +int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +void txgbe_check_vf_rate_limit(struct txgbe_adapter *adapter); +void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state); + +/* + * These are defined in txgbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +#define TXGBE_DEV_ID_SP_VF 0x1000 +#endif /* _TXGBE_SRIOV_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..04a00fed59c27a149a0b2e288898d7dacaef2b59 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c @@ -0,0 +1,229 @@ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_sysfs.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" +#include "txgbe_hw.h" +#include "txgbe_type.h" + +#ifdef TXGBE_SYSFS + +#include +#include +#include +#include +#include +#include +#include +#ifdef TXGBE_HWMON +#include +#endif + +#ifdef TXGBE_HWMON +/* hwmon callback functions */ +static ssize_t txgbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + TCALL(txgbe_attr->hw, mac.ops.get_thermal_sensor_data); + + value = txgbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t txgbe_hwmon_show_alarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = txgbe_attr->sensor->alarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t txgbe_hwmon_show_dalarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = txgbe_attr->sensor->dalarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * txgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int txgbe_add_hwmon_attr(struct txgbe_adapter *adapter, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *txgbe_attr; + + n_attr = adapter->txgbe_hwmon_buff.n_hwmon; + txgbe_attr = &adapter->txgbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case TXGBE_HWMON_TYPE_TEMP: + txgbe_attr->dev_attr.show = txgbe_hwmon_show_temp; + snprintf(txgbe_attr->name, sizeof(txgbe_attr->name), + "temp%u_input", 0); + break; + case TXGBE_HWMON_TYPE_ALARMTHRESH: + txgbe_attr->dev_attr.show = txgbe_hwmon_show_alarmthresh; + snprintf(txgbe_attr->name, sizeof(txgbe_attr->name), + "temp%u_alarmthresh", 0); + break; + case TXGBE_HWMON_TYPE_DALARMTHRESH: + txgbe_attr->dev_attr.show = txgbe_hwmon_show_dalarmthresh; + snprintf(txgbe_attr->name, sizeof(txgbe_attr->name), + "temp%u_dalarmthresh", 0); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + txgbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor; + txgbe_attr->hw = &adapter->hw; + txgbe_attr->dev_attr.store = NULL; + txgbe_attr->dev_attr.attr.mode = S_IRUGO; + txgbe_attr->dev_attr.attr.name = txgbe_attr->name; + + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &txgbe_attr->dev_attr); + + if (rc == 0) + ++adapter->txgbe_hwmon_buff.n_hwmon; + + return rc; +} +#endif /* TXGBE_HWMON */ + +static void txgbe_sysfs_del_adapter( + struct txgbe_adapter __maybe_unused *adapter) +{ +#ifdef TXGBE_HWMON + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->txgbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->txgbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->txgbe_hwmon_buff.hwmon_list); + + if (adapter->txgbe_hwmon_buff.device) + hwmon_device_unregister(adapter->txgbe_hwmon_buff.device); +#endif /* TXGBE_HWMON */ +} + +/* called from txgbe_main.c */ +void txgbe_sysfs_exit(struct txgbe_adapter *adapter) +{ + txgbe_sysfs_del_adapter(adapter); +} + +/* called from txgbe_main.c */ +int txgbe_sysfs_init(struct txgbe_adapter *adapter) +{ + int rc = 0; +#ifdef TXGBE_HWMON + struct hwmon_buff *txgbe_hwmon = &adapter->txgbe_hwmon_buff; + int n_attrs; + +#endif /* TXGBE_HWMON */ + if (adapter == NULL) + goto err; + +#ifdef TXGBE_HWMON + + /* Don't create thermal hwmon interface if no sensors present */ + if (TCALL(&adapter->hw, mac.ops.init_thermal_sensor_thresh)) + goto no_thermal; + + /* + * Allocation space for max attributs + * max num sensors * values (temp, alamthresh, dalarmthresh) + */ + n_attrs = 3; + txgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!txgbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + txgbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(txgbe_hwmon->device)) { + rc = PTR_ERR(txgbe_hwmon->device); + goto err; + } + + + /* Bail if any hwmon attr struct fails to initialize */ + rc = txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_TEMP); + rc |= txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_ALARMTHRESH); + rc |= txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_DALARMTHRESH); + if (rc) + goto err; + +no_thermal: +#endif /* TXGBE_HWMON */ + goto exit; + +err: + txgbe_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif /* TXGBE_SYSFS */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f95ce4f21e2fbcb745474316f282e72..fa2ebe2e907863fd283d202f8d9698333de042a0 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -1,14 +1,109 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on txgbe_type.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + #ifndef _TXGBE_TYPE_H_ #define _TXGBE_TYPE_H_ -#include +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - TXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - TXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - TXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - TXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - TXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - TXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "txgbe_osdep.h" +#include "txgbe_mtd.h" + +/* Override this by setting IOMEM in your txgbe_osdep.h header */ +#ifndef IOMEM +#define IOMEM +#endif + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif + +/************ txgbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_TRUSTNETIC +#define PCI_VENDOR_ID_TRUSTNETIC 0x8088 +#endif /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 #define TXGBE_DEV_ID_WX1820 0x2001 +#define TXGBE_DEV_ID_AML 0x5000 +#define TXGBE_DEV_ID_AML5025 0x5025 +#define TXGBE_DEV_ID_AML5125 0x5125 +#define TXGBE_DEV_ID_AML5040 0x5040 +#define TXGBE_DEV_ID_AML5140 0x5140 /* Subsystem IDs */ /* SFP */ @@ -35,158 +130,3584 @@ #define TXGBE_ID_WX1820_MAC_SGMII 0x2060 #define TXGBE_ID_MAC_SGMII 0x60 +#define TXGBE_NCSI_SUP 0x8000 +#define TXGBE_NCSI_MASK 0x8000 +#define TXGBE_WOL_SUP 0x4000 +#define TXGBE_WOL_MASK 0x4000 +#define TXGBE_DEV_MASK 0xf0 + +#define TXGBE_FLASH_HEADER_FLAG 0x5aa5 + + /* Combined interface*/ -#define TXGBE_ID_SFI_XAUI 0x50 +#define TXGBE_ID_SFI_XAUI 0x50 /* Revision ID */ -#define TXGBE_SP_MPW 1 +#define TXGBE_SP_MPW 0xfe + +/* MDIO Manageable Devices (MMDs). */ +#define TXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ +#define TXGBE_MDIO_PCS_DEV_TYPE 0x3 /* Physical Coding Sublayer*/ +#define TXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ +#define TXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE 0x1F /* Vendor specific 2 */ + +/* phy register definitions */ +/* VENDOR_SPECIFIC_1_DEV regs */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ + +/* VENDOR_SPECIFIC_2_DEV regs */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL 0xF001 +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_SW_RST BIT(15) +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER BIT(11) + +/* AUTO_NEG_DEV regs */ +#define TXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define TXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define TXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Reg */ +#define TXGBE_MDIO_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG RX LP Status + * Reg */ +#define TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define TXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define TXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define TXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ + + +#define TXGBE_MDIO_AUTO_NEG_10GBASE_EEE_ADVT 0x8 +#define TXGBE_MDIO_AUTO_NEG_1000BASE_EEE_ADVT 0x4 +#define TXGBE_MDIO_AUTO_NEG_100BASE_EEE_ADVT 0x2 +#define TXGBE_MDIO_AUTO_NEG_LP_1000BASE_CAP 0x8000 +#define TXGBE_MDIO_AUTO_NEG_LP_10GBASE_CAP 0x0800 +#define TXGBE_MDIO_AUTO_NEG_10GBASET_STAT 0x0021 + +#define TXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define TXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define TXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define TXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define TXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define TXGBE_MII_RESTART 0x200 +#define TXGBE_MII_AUTONEG_COMPLETE 0x20 +#define TXGBE_MII_AUTONEG_LINK_UP 0x04 +#define TXGBE_MII_AUTONEG_REG 0x0 + +/* PHY_XS_DEV regs */ +#define TXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define TXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ + +/* Media-dependent registers. */ +#define TXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define TXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define TXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define TXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ + +#define TXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define TXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define TXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define TXGBE_MDIO_PHY_SPEED_10M 0x0040 /* 10M capable */ + +#define TXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define TXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define TXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ + +#define TXGBE_PHY_REVISION_MASK 0xFFFFFFF0U +#define TXGBE_MAX_PHY_ADDR 32 -/**************** SP Registers ****************************/ +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410U +#define QT2022_PHY_ID 0x0043A400U +#define ATH_PHY_ID 0x03429050U +/* PHY FW revision */ +#define TNX_FW_REV 0xB +#define AQ_FW_REV 0x20 + +/* AMLITE ETH PHY Registers */ +#define SR_AN_CTRL 0x70000 +#define VR_PCS_DIG_CTRL1 0x38000 +#define SR_PCS_CTRL1 0x30000 +#define SR_PCS_CTRL2 0x30007 +#define SR_PMA_CTRL2 0x10007 +#define VR_PCS_DIG_CTRL3 0x38003 +#define VR_PMA_CTRL3 0x180a8 +#define VR_PMA_CTRL4 0x180a9 +#define SR_PMA_RS_FEC_CTRL 0x100c8 +#define ANA_OVRDEN0 0xca4 +#define ANA_OVRDEN1 0xca8 +#define ANA_OVRDVAL0 0xcb0 +#define ANA_OVRDVAL5 0xcc4 +#define OSC_CAL_N_CDR4 0x14 +#define PLL0_CFG0 0xc10 +#define PLL0_CFG2 0xc18 +#define PLL0_DIV_CFG0 0xc1c +#define PLL1_CFG0 0xc48 +#define PLL1_CFG2 0xc50 +#define PIN_OVRDEN0 0xc8c +#define PIN_OVRDVAL0 0xc94 +#define DATAPATH_CFG0 0x142c +#define DATAPATH_CFG1 0x1430 +#define AN_CFG1 0x1438 +#define SPARE52 0x16fc +#define RXS_CFG0 0x000 +#define PMD_CFG0 0x1400 +#define SR_PCS_STS1 0x30001 + +/* ETH PHY Registers */ +#define TXGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define TXGBE_SR_PCS_CTL2 0x30007 +#define TXGBE_SR_PMA_MMD_CTL1 0x10000 +#define TXGBE_SR_MII_MMD_CTL 0x1F0000 +#define TXGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define TXGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define TXGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM 0x80 +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM 0x100 +#define TXGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define TXGBE_SR_AN_MMD_CTL 0x70000 +#define TXGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define TXGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define TXGBE_SR_AN_MMD_ADV_REG3 0x70012 +#define TXGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define TXGBE_SR_AN_MMD_LP_ABL2 0x70014 +#define TXGBE_SR_AN_MMD_LP_ABL3 0x70015 +#define TXGBE_VR_AN_KR_MODE_CL 0x78003 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 +#define TXGBE_PHY_MPLLA_CTL0 0x18071 +#define TXGBE_PHY_MPLLA_CTL3 0x18077 +#define TXGBE_PHY_MISC_CTL0 0x18090 +#define TXGBE_PHY_VCO_CAL_LD0 0x18092 +#define TXGBE_PHY_VCO_CAL_LD1 0x18093 +#define TXGBE_PHY_VCO_CAL_LD2 0x18094 +#define TXGBE_PHY_VCO_CAL_LD3 0x18095 +#define TXGBE_PHY_VCO_CAL_REF0 0x18096 +#define TXGBE_PHY_VCO_CAL_REF1 0x18097 +#define TXGBE_PHY_RX_AD_ACK 0x18098 +#define TXGBE_PHY_AFE_DFE_ENABLE 0x1805D +#define TXGBE_PHY_DFE_TAP_CTL0 0x1805E +#define TXGBE_PHY_RX_EQ_ATT_LVL0 0x18057 +#define TXGBE_PHY_RX_EQ_CTL0 0x18058 +#define TXGBE_PHY_RX_EQ_CTL 0x1805C +#define TXGBE_PHY_TX_EQ_CTL0 0x18036 +#define TXGBE_PHY_TX_EQ_CTL1 0x18037 +#define TXGBE_PHY_TX_RATE_CTL 0x18034 +#define TXGBE_PHY_RX_RATE_CTL 0x18054 +#define TXGBE_PHY_TX_GEN_CTL2 0x18032 +#define TXGBE_PHY_RX_GEN_CTL2 0x18052 +#define TXGBE_PHY_RX_GEN_CTL3 0x18053 +#define TXGBE_PHY_MPLLA_CTL2 0x18073 +#define TXGBE_PHY_RX_POWER_ST_CTL 0x18055 +#define TXGBE_PHY_TX_POWER_ST_CTL 0x18035 +#define TXGBE_PHY_TX_GENCTRL1 0x18031 + +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R 0x0 +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X 0x1 +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK 0x3 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G 0x0 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G 0x2000 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK 0x2000 +#define TXGBE_SR_PMA_MMD_CTL1_LB_EN 0x1 +#define TXGBE_SR_MII_MMD_CTL_AN_EN 0x1000 +#define TXGBE_SR_MII_MMD_CTL_RESTART_AN 0x0200 +#define TXGBE_SR_AN_MMD_CTL_RESTART_AN 0x0200 +#define TXGBE_SR_AN_MMD_CTL_ENABLE 0x1000 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10 + +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_10GBASER_KR 33 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x56 +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR 0x7B +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56 +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF +#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1 +#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE +#define TXGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00 +#define TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344 +#define TXGBE_PHY_VCO_CAL_LD0_10GBASER_KR 1353 +#define TXGBE_PHY_VCO_CAL_LD0_OTHER 1360 +#define TXGBE_PHY_VCO_CAL_LD0_MASK 0x1000 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_10GBASER_KR 41 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F +#define TXGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10 +#define TXGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1 +#define TXGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF +#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1 +#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_10GBASER_KR 0x0 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2 +#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20 +#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200 +#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7 +#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70 +#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700 +#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_10GBASER_KR 0x0 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2 +#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20 +#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200 +#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7 +#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70 +#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700 +#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR 0x200 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR_RXAUI 0x300 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300 +#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400 +#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00 +#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000 +#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000 +#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000 +#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR 0x200 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR_RXAUI 0x300 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300 +#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400 +#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00 +#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000 +#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000 +#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000 +#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000 + +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700 + +#define TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 +#define TXGBE_PHY_INIT_DONE_POLLING_TIME 100 + +/* amlite: FPGA */ +/* PHY MDI STANDARD CONFIG */ +#define TXGBE_MDI_PHY_ID1_OFFSET 2 +#define TXGBE_MDI_PHY_ID2_OFFSET 3 +#define TXGBE_MDI_PHY_ID_MASK 0xFFFFFC00U +#define TXGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define TXGBE_MDI_PHY_DUPLEX 0x0100 +#define TXGBE_MDI_PHY_RESTART_AN 0x0200 +#define TXGBE_MDI_PHY_ANE 0x1000 +#define TXGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define TXGBE_MDI_PHY_RESET 0x8000 + +#define TXGBE_PHY_RST_WAIT_PERIOD 50 + +#define TXGBE_MDI_PHY_INT_LSC 0x0400 +#define TXGBE_MDI_PHY_INT_ANC 0x0800 + +#define MV1119_CTRL 0 /* Page Any, Control reg */ +#define MV1119_STUS 1 /* Page Any, Status reg */ +#define MV1119_PHY_ID_1 2 /* Page Any, Phy Identifier 1 */ +#define MV1119_PHY_ID_2 3 /* Page Any, Phy Identifier 2 */ +#define MV1119_AUTO_NEGO_ADVER 4 /* Page Any, Auto-Negotiation Advertisement reg */ +#define MV1119_LK_PARTNER_ABILITY 5 /* Page Any, Link Partner Ability reg */ +#define MV1119_AUTO_NEGO_EX 6 /* Page Any, Auto-Negotiation Expansion reg */ +#define MV1119_NEXT_PAGE_TRANS 7 /* Page Any, Next Page Transmit reg */ +#define MV1119_LK_PARTNER_NEXT_PAGE 8 /* Page Any, Link Partner Next Page reg */ +#define MV1119_1000BASE_T_CTRL 9 /* Page Any, 1000BASE-T Control reg */ +#define MV1119_1000BASE_T_STUS 10 /* Page Any, 1000BASE-T Status reg */ +#define MV1119_EX_STUS 15 /* Page Any, Extended Status reg */ +#define MV1119_CO_SPEC_CTRL_1 16 /* Page 0, Copper Specific Control reg 1 */ +#define MV1119_CO_SPEC_STUS_1 17 /* Page 0, Copper Specific Status reg 1 */ +#define MV1119_CO_SPEC_INT_EN 18 /* Page 0, Copper Specific Interrupt Enable reg */ +#define MV1119_CO_SPEC_STUS_2 19 /* Page 0, Copper Specific Status reg 2 */ +#define MV1119_CO_SPEC_CTRL_3 20 /* Page 0, Copper Specific Control reg 3 */ +#define MV1119_RECE_ERR_COUT 21 /* Page 0, Receive Error Counter reg */ +#define MV1119_PAGE_ADD 22 /* Page Any, Page Address */ +#define MV1119_GLO_INT_STUS 23 /* Page 0,2, Global Interrupt Status */ +#define MV1119_CO_SPEC_CTRL_2 26 /* Page 0, Copper Specific Control reg 2 */ +#define MV1119_MAC_SPEC_CTRL_1 16 /* Page 2, MAC Specific Control reg 1 */ +#define MV1119_MAC_SPEC_INT_EN 18 /* Page 2, MAC Specific Interrupt Enable reg */ +#define MV1119_MAC_SPEC_STUS_2 19 /* Page 2, MAC Specific Status reg 2 */ +#define MV1119_MAC_SPEC_CTRL 21 /* Page 2, MAC Specific Control reg */ +#define MV1119_LED_FUN_CTRL 16 /* Page 3, LED Function Control reg */ +#define MV1119_LED_POLAR_CTRL 17 /* Page 3, LED Polarity Control reg */ +#define MV1119_LED_TIME_CTRL 18 /* Page 3, LED Timer Control reg */ + +#define CBIT(_x) (1 << (_x)) + +#define MV1119_C_RESET CBIT(15) +#define MV1119_C_LOOPBACK CBIT(14) +#define MV1119_C_AUTO_NE_EN CBIT(12) +#define MV1119_C_POWER_DOWN CBIT(11) +#define MV1119_C_RE_CO_AUTO_NE CBIT(9) +#define MV1119_C_CO_DUPLEX_MODE CBIT(8) +#define MV1119_C_SPEED_SELECT1 CBIT(6) +#define MV1119_C_10M 0x00 +#define MV1119_C_100M CBIT(13) +#define MV1119_C_1000M CBIT(6) +#define MV1119_C_FULL_DUP CBIT(8) +#define MV1119_C_HALF_DUP 0x00 +#define MV1119_ANA_ASYM_PAUSE CBIT(11) +#define MV1119_ANA_PAUSE CBIT(10) +#define MV1119_ANA_100FULL CBIT(8) +#define MV1119_ANA_100HALF CBIT(7) +#define MV1119_ANA_10FULL CBIT(6) +#define MV1119_ANA_10HALF CBIT(5) +#define MV1119_1000BC_1000FULL CBIT(9) +#define MV1119_1000BC_1000HALF CBIT(8) +#define MV1119_CSS1_SPEED (CBIT(14) | CBIT(15)) +#define MV1119_CSS1_DUPLEX CBIT(13) +#define MV1119_CSS1_LINK CBIT(10) +#define MV1119_CSS2_AUTO_NE_ERR CBIT(15) +#define MV1119_CSS2_SPEED_CH CBIT(14) +#define MV1119_CSS2_DUPLEX_CH CBIT(13) +#define MV1119_CSS2_AUTO_NE_COMPLETE CBIT(11) +#define MV1119_CSS2_CO_LINK_STATUS_CH CBIT(10) +#define MV1119_CSC_DOWNSHIFT_COUNT (CBIT(12) | CBIT(13) | CBIT(14)) +#define MV1119_CSC_DOWNSHIFT_EN CBIT(11) +#define MV1119_CSC_POWER_DOWN CBIT(2) + + +#define MV1119_ANA_100 (MV1119_ANA_100FULL | MV1119_ANA_100HALF) +#define MV1119_ANA_10 (MV1119_ANA_10FULL | MV1119_ANA_10HALF) +#define MV1119_ANA_100_AND_10 (MV1119_ANA_100 | MV1119_ANA_10) +#define MV1119_1000BC_1000 (MV1119_1000BC_1000FULL | MV1119_1000BC_1000HALF) + +/**************** Global Registers ****************************/ /* chip control Registers */ -#define TXGBE_MIS_PRB_CTL 0x10010 -#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) -/* FMGR Registers */ -#define TXGBE_SPI_ILDR_STATUS 0x10120 -#define TXGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ -#define TXGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ -#define TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */ +#define TXGBE_MIS_RST 0x1000C +#define TXGBE_MIS_PWR 0x10000 +#define TXGBE_MIS_CTL 0x10004 +#define TXGBE_MIS_PF_SM 0x10008 +#define TXGBE_MIS_PRB_CTL 0x10010 /* for PCIE recovery only */ +#define TXGBE_MIS_ST 0x10028 +#define TXGBE_MIS_SWSM 0x1002C +#define TXGBE_MIS_RST_ST 0x10030 + +#define PX_PF_PEND 0x4C0 +#define PX_VF_PEND(i) (0x4D0 + 4 * (i)) /* i = [0,3]*/ +#define PX_PF_BME 0x4B8 + +#define TXGBE_MIS_RST_SW_RST 0x00000001U +#define TXGBE_MIS_RST_LAN0_RST 0x00000002U +#define TXGBE_MIS_RST_LAN1_RST 0x00000004U +#define TXGBE_MIS_RST_LAN0_EPHY_RST 0x00080000U +#define TXGBE_MIS_RST_LAN1_EPHY_RST 0x00010000U +#define TXGBE_MIS_RST_LAN0_MAC_RST 0x00100000U +#define TXGBE_MIS_RST_LAN1_MAC_RST 0x00020000U +#define TXGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U +#define TXGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U +#define TXGBE_MIS_RST_GLOBAL_RST 0x80000000U +#define TXGBE_MIS_RST_MASK (TXGBE_MIS_RST_SW_RST | \ + TXGBE_MIS_RST_LAN0_RST | \ + TXGBE_MIS_RST_LAN1_RST) +#define TXGBE_MIS_PWR_LAN_ID(_r) ((0xC0000000U & (_r)) >> 30) +#define TXGBE_MIS_PWR_LAN_ID_0 (1) +#define TXGBE_MIS_PWR_LAN_ID_1 (2) +#define TXGBE_MIS_PWR_LAN_ID_A (3) +#define TXGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define TXGBE_MIS_ST_MNG_VETO 0x00000100U +#define TXGBE_MIS_ST_LAN0_ECC 0x00010000U +#define TXGBE_MIS_ST_LAN1_ECC 0x00020000U +#define TXGBE_MIS_ST_MNG_ECC 0x00040000U +#define TXGBE_MIS_ST_PCORE_ECC 0x00080000U +#define TXGBE_MIS_ST_PCIWRP_ECC 0x00100000U +#define TXGBE_MIS_SWSM_SMBI 1 +#define TXGBE_MIS_RST_ST_DEV_RST_ST_DONE 0x00000000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_REQ 0x00080000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS 0x00100000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 +#define TXGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define TXGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define TXGBE_MIS_RST_ST_RST_TIM 0x000000FFU +#define TXGBE_MIS_PF_SM_SM 1 +#define TXGBE_MIS_PRB_CTL_LAN0_UP 0x2 +#define TXGBE_MIS_PRB_CTL_LAN1_UP 0x1 /* Sensors for PVT(Process Voltage Temperature) */ -#define TXGBE_TS_CTL 0x10300 -#define TXGBE_TS_CTL_EVAL_MD BIT(31) +#define TXGBE_TS_CTL 0x10300 +#define TXGBE_TS_EN 0x10304 +#define TXGBE_TS_ST 0x10308 +#define TXGBE_TS_ALARM_THRE 0x1030C +#define TXGBE_TS_DALARM_THRE 0x10310 +#define TXGBE_TS_INT_EN 0x10314 +#define TXGBE_TS_ALARM_ST 0x10318 +#define TXGBE_TS_ALARM_ST_DALARM 0x00000002U +#define TXGBE_TS_ALARM_ST_ALARM 0x00000001U -/* GPIO register bit */ -#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */ -#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */ -#define TXGBE_GPIOBIT_2 BIT(2) /* I:sfp module absent */ -#define TXGBE_GPIOBIT_3 BIT(3) /* I:rx signal lost */ -#define TXGBE_GPIOBIT_4 BIT(4) /* O:rate select, 1G(0) 10G(1) */ -#define TXGBE_GPIOBIT_5 BIT(5) /* O:rate select, 1G(0) 10G(1) */ +#define TXGBE_TS_CTL_EVAL_MD 0x80000000U +#define TXGBE_TS_EN_ENA 0x00000001U +#define TXGBE_TS_ST_DATA_OUT_MASK 0x000003FFU +#define TXGBE_TS_ALARM_THRE_MASK 0x000003FFU +#define TXGBE_TS_DALARM_THRE_MASK 0x000003FFU +#define TXGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define TXGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U -/* Extended Interrupt Enable Set */ -#define TXGBE_PX_MISC_ETH_LKDN BIT(8) -#define TXGBE_PX_MISC_DEV_RST BIT(10) -#define TXGBE_PX_MISC_ETH_EVENT BIT(17) -#define TXGBE_PX_MISC_ETH_LK BIT(18) -#define TXGBE_PX_MISC_ETH_AN BIT(19) -#define TXGBE_PX_MISC_INT_ERR BIT(20) -#define TXGBE_PX_MISC_GPIO BIT(26) -#define TXGBE_PX_MISC_IEN_MASK \ - (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ - TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ - TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ - TXGBE_PX_MISC_GPIO) - -/* Port cfg registers */ -#define TXGBE_CFG_PORT_ST 0x14404 -#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) +/* Sensors for AMLITE PVT(Process Voltage Temperature) */ +#define TXGBE_AML_INTR_RAW_HI 0x10300 +#define TXGBE_AML_INTR_RAW_ME 0x10304 +#define TXGBE_AML_INTR_RAW_LO 0x10308 +#define TXGBE_AML_TS_CTL1 0x10330 +#define TXGBE_AML_TS_CTL2 0x10334 +#define TXGBE_AML_TS_ENA 0x10338 +#define TXGBE_AML_TS_STS 0x1033C +#define TXGBE_AML_INTR_HIGH_EN 0x10318 +#define TXGBE_AML_INTR_MED_EN 0x1031C +#define TXGBE_AML_INTR_LOW_EN 0x10320 +#define TXGBE_AML_INTR_HIGH_STS 0x1030C +#define TXGBE_AML_INTR_MED_STS 0x10310 +#define TXGBE_AML_INTR_LOW_STS 0x10314 + +#define TXGBE_AML_TS_STS_VLD 0x1000 +#define TXGBE_AML_INTR_EN_HI 0x00000002U +#define TXGBE_AML_INTR_EN_ME 0x00000001U +#define TXGBE_AML_INTR_EN_LO 0x00000001U +#define TXGBE_AML_INTR_CL_HI 0x00000002U +#define TXGBE_AML_INTR_CL_ME 0x00000001U +#define TXGBE_AML_INTR_CL_LO 0x00000001U +#define TXGBE_AML_EVAL_MODE_MASK 0x010U +#define TXGBE_AML_CAL_MODE_MASK 0x08U +#define TXGBE_AML_ALARM_THRE_MASK 0x1FFE0000U +#define TXGBE_AML_DALARM_THRE_MASK 0x0001FFE0U + +struct txgbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct txgbe_thermal_sensor_data { + struct txgbe_thermal_diode_data sensor; +}; + + +/* FMGR Registers */ +#define TXGBE_SPI_ILDR_STATUS 0x10120 +#define TXGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define TXGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset is done */ +#define TXGBE_SPI_ILDR_STATUS_SW_RESET 0x00000080U /* software reset is done */ +#define TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00000200U /* lan0 soft reset done */ +#define TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00000400U /* lan1 soft reset done */ + +#define TXGBE_MAX_FLASH_LOAD_POLL_TIME 10 +#define TXGBE_SPI_CMD 0x10104 +#define TXGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define TXGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define TXGBE_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) +#define TXGBE_SPI_DATA 0x10108 +#define TXGBE_SPI_DATA_BYPASS ((0x1) << 31) +#define TXGBE_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) +#define TXGBE_SPI_DATA_OP_DONE ((0x1)) + +#define TXGBE_SPI_STATUS 0x1010C +#define TXGBE_SPI_STATUS_OPDONE ((0x1)) +#define TXGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) + +#define TXGBE_SPI_USR_CMD 0x10110 +#define TXGBE_SPI_CMDCFG0 0x10114 +#define TXGBE_SPI_CMDCFG1 0x10118 +#define TXGBE_SPI_ECC_CTL 0x10130 +#define TXGBE_SPI_ECC_INJ 0x10134 +#define TXGBE_SPI_ECC_ST 0x10138 +#define TXGBE_SPI_ILDR_SWPTR 0x10124 + +/************************* Port Registers ************************************/ /* I2C registers */ -#define TXGBE_I2C_BASE 0x14900 +#define TXGBE_I2C_CON 0x14900 /* I2C Control */ +#define TXGBE_I2C_CON_SLAVE_DISABLE ((1 << 6)) +#define TXGBE_I2C_CON_RESTART_EN ((1 << 5)) +#define TXGBE_I2C_CON_10BITADDR_MASTER ((1 << 4)) +#define TXGBE_I2C_CON_10BITADDR_SLAVE ((1 << 3)) +#define TXGBE_I2C_CON_SPEED(_v) (((_v) & 0x3) << 1) +#define TXGBE_I2C_CON_MASTER_MODE ((1 << 0)) +#define TXGBE_I2C_TAR 0x14904 /* I2C Target Address */ +#define TXGBE_I2C_DATA_CMD 0x14910 /* I2C Rx/Tx Data Buf and Cmd */ +#define TXGBE_I2C_DATA_CMD_STOP ((1 << 9)) +#define TXGBE_I2C_DATA_CMD_READ ((1 << 8) | TXGBE_I2C_DATA_CMD_STOP) +#define TXGBE_I2C_DATA_CMD_WRITE ((0 << 8) | TXGBE_I2C_DATA_CMD_STOP) +#define TXGBE_I2C_SS_SCL_HCNT 0x14914 /* Standard speed I2C Clock SCL + * High Count */ +#define TXGBE_I2C_SS_SCL_LCNT 0x14918 /* Standard speed I2C Clock SCL + * Low Count */ +#define TXGBE_I2C_FS_SCL_HCNT 0x1491C /* Fast Mode and Fast Mode Plus + * I2C Clock SCL High Count */ +#define TXGBE_I2C_FS_SCL_LCNT 0x14920 /* Fast Mode and Fast Mode Plus + * I2C Clock SCL Low Count */ +#define TXGBE_I2C_HS_SCL_HCNT 0x14924 /* High speed I2C Clock SCL + * High Count */ +#define TXGBE_I2C_HS_SCL_LCNT 0x14928 /* High speed I2C Clock SCL Low + * Count */ +#define TXGBE_I2C_INTR_STAT 0x1492C /* I2C Interrupt Status */ +#define TXGBE_I2C_RAW_INTR_STAT 0x14934 /* I2C Raw Interrupt Status */ +#define TXGBE_I2C_INTR_STAT_RX_FULL ((0x1) << 2) +#define TXGBE_I2C_INTR_STAT_TX_EMPTY ((0x1) << 4) +#define TXGBE_I2C_INTR_MASK 0x14930 /* I2C Interrupt Mask */ +#define TXGBE_I2C_RX_TL 0x14938 /* I2C Receive FIFO Threshold */ +#define TXGBE_I2C_TX_TL 0x1493C /* I2C TX FIFO Threshold */ +#define TXGBE_I2C_CLR_INTR 0x14940 /* Clear Combined and Individual + * Int */ +#define TXGBE_I2C_CLR_RX_UNDER 0x14944 /* Clear RX_UNDER Interrupt */ +#define TXGBE_I2C_CLR_RX_OVER 0x14948 /* Clear RX_OVER Interrupt */ +#define TXGBE_I2C_CLR_TX_OVER 0x1494C /* Clear TX_OVER Interrupt */ +#define TXGBE_I2C_CLR_RD_REQ 0x14950 /* Clear RD_REQ Interrupt */ +#define TXGBE_I2C_CLR_TX_ABRT 0x14954 /* Clear TX_ABRT Interrupt */ +#define TXGBE_I2C_CLR_RX_DONE 0x14958 /* Clear RX_DONE Interrupt */ +#define TXGBE_I2C_CLR_ACTIVITY 0x1495C /* Clear ACTIVITY Interrupt */ +#define TXGBE_I2C_CLR_STOP_DET 0x14960 /* Clear STOP_DET Interrupt */ +#define TXGBE_I2C_CLR_START_DET 0x14964 /* Clear START_DET Interrupt */ +#define TXGBE_I2C_CLR_GEN_CALL 0x14968 /* Clear GEN_CALL Interrupt */ +#define TXGBE_I2C_ENABLE 0x1496C /* I2C Enable */ +#define TXGBE_I2C_STATUS 0x14970 /* I2C Status register */ +#define TXGBE_I2C_STATUS_MST_ACTIVITY ((1U << 5)) +#define TXGBE_I2C_TXFLR 0x14974 /* Transmit FIFO Level Reg */ +#define TXGBE_I2C_RXFLR 0x14978 /* Receive FIFO Level Reg */ +#define TXGBE_I2C_SDA_HOLD 0x1497C /* SDA hold time length reg */ +#define TXGBE_I2C_SDA_RX_HOLD 0xff0000 /* SDA rx hold time length reg */ +#define TXGBE_I2C_SDA_TX_HOLD 0xffff /* SDA tx hold time length reg */ + +#define TXGBE_I2C_TX_ABRT_SOURCE 0x14980 /* I2C TX Abort Status Reg */ +#define TXGBE_I2C_SDA_SETUP 0x14994 /* I2C SDA Setup Register */ +#define TXGBE_I2C_ENABLE_STATUS 0x1499C /* I2C Enable Status Register */ +#define TXGBE_I2C_FS_SPKLEN 0x149A0 /* ISS and FS spike suppression + * limit */ +#define TXGBE_I2C_HS_SPKLEN 0x149A4 /* HS spike suppression limit */ +#define TXGBE_I2C_SCL_STUCK_TIMEOUT 0x149AC /* I2C SCL stuck at low timeout + * register */ +#define TXGBE_I2C_SDA_STUCK_TIMEOUT 0x149B0 /*I2C SDA Stuck at Low Timeout*/ +#define TXGBE_I2C_CLR_SCL_STUCK_DET 0x149B4 /* Clear SCL Stuck at Low Detect + * Interrupt */ +#define TXGBE_I2C_DEVICE_ID 0x149b8 /* I2C Device ID */ +#define TXGBE_I2C_COMP_PARAM_1 0x149f4 /* Component Parameter Reg */ +#define TXGBE_I2C_COMP_VERSION 0x149f8 /* Component Version ID */ +#define TXGBE_I2C_COMP_TYPE 0x149fc /* DesignWare Component Type + * Reg */ + +#define TXGBE_I2C_SLAVE_ADDR (0xA0 >> 1) +#define TXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 + + +/* port cfg Registers */ +#define TXGBE_CFG_PORT_CTL 0x14400 +#define TXGBE_CFG_PORT_ST 0x14404 +#define TXGBE_CFG_EX_VTYPE 0x14408 +#define TXGBE_CFG_LED_CTL 0x14424 +#define TXGBE_CFG_VXLAN 0x14410 +#define TXGBE_CFG_VXLAN_GPE 0x14414 +#define TXGBE_CFG_GENEVE 0x14418 +#define TXGBE_CFG_TEREDO 0x1441C +#define TXGBE_CFG_TCP_TIME 0x14420 +#define TXGBE_LINKUP_FILTER 0x14428 +#define TXGBE_LINKUP_FILTER_TIME 30 +#define TXGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) + +/*AML LINK STATUS OVERWRITE*/ +#define TXGBE_AML_EPCS_MISC_CTL 0x13240 +#define TXGBE_AML_LINK_STATUS_OVRD_EN 0x00000020 +#define TXGBE_AML_LINK_STATUS_OVRD_VAL 0x00000010 + +/* port cfg bit */ +#define TXGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ +#define TXGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define TXGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U +#define TXGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define TXGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define TXGBE_CFG_PORT_CTL_FORCE_LKUP 0x00000010U /* force link up */ +#define TXGBE_CFG_PORT_CTL_DCB_EN 0x00000400U /* dcb enabled */ +#define TXGBE_CFG_PORT_CTL_NUM_TC_MASK 0x00000800U /* number of TCs */ +#define TXGBE_CFG_PORT_CTL_NUM_TC_4 0x00000000U +#define TXGBE_CFG_PORT_CTL_NUM_TC_8 0x00000800U +#define TXGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00003000U /* number of TVs */ +#define TXGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_16 0x00001000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_32 0x00002000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_64 0x00003000U +/* Status Bit */ +#define TXGBE_CFG_PORT_ST_LINK_UP 0x00000001U +#define TXGBE_CFG_PORT_ST_LINK_10G 0x00000002U +#define TXGBE_CFG_PORT_ST_LINK_1G 0x00000004U +#define TXGBE_CFG_PORT_ST_LINK_100M 0x00000008U +#define TXGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000100U & (_r)) >> 8) +#define TXGBE_LINK_UP_TIME 90 + +/* amlite: diff from sapphire */ +#define TXGBE_CFG_PORT_ST_AML_LINK_10G 0x00000010U +#define TXGBE_CFG_PORT_ST_AML_LINK_25G 0x00000008U +#define TXGBE_CFG_PORT_ST_AML_LINK_40G 0x00000004U +#define TXGBE_CFG_PORT_ST_AML_LINK_50G 0x00000002U + +/* LED CTL Bit */ +#define TXGBE_CFG_LED_CTL_LINK_BSY_SEL 0x00000010U +#define TXGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000008U +#define TXGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000004U +#define TXGBE_CFG_LED_CTL_LINK_10G_SEL 0x00000002U +#define TXGBE_CFG_LED_CTL_LINK_UP_SEL 0x00000001U +#define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 + +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_BSY_SEL 0x00000020U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_10G_SEL 0x00000010U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL 0x00000008U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL 0x00000004U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_50G_SEL 0x00000002U + +/* LED modes */ +#define TXGBE_LED_LINK_UP TXGBE_CFG_LED_CTL_LINK_UP_SEL +#define TXGBE_LED_LINK_10G TXGBE_CFG_LED_CTL_LINK_10G_SEL +#define TXGBE_LED_LINK_ACTIVE TXGBE_CFG_LED_CTL_LINK_BSY_SEL +#define TXGBE_LED_LINK_1G TXGBE_CFG_LED_CTL_LINK_1G_SEL +#define TXGBE_LED_LINK_100M TXGBE_CFG_LED_CTL_LINK_100M_SEL + +#define TXGBE_AMLITE_LED_LINK_ACTIVE TXGBE_AMLITE_CFG_LED_CTL_LINK_BSY_SEL +#define TXGBE_AMLITE_LED_LINK_10G TXGBE_AMLITE_CFG_LED_CTL_LINK_10G_SEL +#define TXGBE_AMLITE_LED_LINK_25G TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL +#define TXGBE_AMLITE_LED_LINK_40G TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL +#define TXGBE_AMLITE_LED_LINK_50G TXGBE_AMLITE_CFG_LED_CTL_LINK_50G_SEL + +/* GPIO Registers */ +#define TXGBE_GPIO_DR 0x14800 +#define TXGBE_GPIO_DDR 0x14804 +#define TXGBE_GPIO_CTL 0x14808 +#define TXGBE_GPIO_INTEN 0x14830 +#define TXGBE_GPIO_INTMASK 0x14834 +#define TXGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define TXGBE_GPIO_INT_POLARITY 0x1483C +#define TXGBE_GPIO_INTSTATUS 0x14844 +#define TXGBE_GPIO_DEBOUNCE 0x14848 +#define TXGBE_GPIO_EOI 0x1484C +#define TXGBE_GPIO_EXT 0x14850 + +/*GPIO bit */ +#define TXGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ +#define TXGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ +#define TXGBE_GPIO_DR_2 0x00000004U /* SDP2 Data Value */ +#define TXGBE_GPIO_DR_3 0x00000008U /* SDP3 Data Value */ +#define TXGBE_GPIO_DR_4 0x00000010U /* SDP4 Data Value */ +#define TXGBE_GPIO_DR_5 0x00000020U /* SDP5 Data Value */ +#define TXGBE_GPIO_DR_6 0x00000040U /* SDP6 Data Value */ +#define TXGBE_GPIO_DR_7 0x00000080U /* SDP7 Data Value */ +#define TXGBE_GPIO_DDR_0 0x00000001U /* SDP0 IO direction */ +#define TXGBE_GPIO_DDR_1 0x00000002U /* SDP1 IO direction */ +#define TXGBE_GPIO_DDR_2 0x00000004U /* SDP1 IO direction */ +#define TXGBE_GPIO_DDR_3 0x00000008U /* SDP3 IO direction */ +#define TXGBE_GPIO_DDR_4 0x00000010U /* SDP4 IO direction */ +#define TXGBE_GPIO_DDR_5 0x00000020U /* SDP5 IO direction */ +#define TXGBE_GPIO_DDR_6 0x00000040U /* SDP6 IO direction */ +#define TXGBE_GPIO_DDR_7 0x00000080U /* SDP7 IO direction */ +#define TXGBE_GPIO_CTL_SW_MODE 0x00000000U /* SDP software mode */ +#define TXGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ +#define TXGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ +#define TXGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define TXGBE_GPIO_INTEN_4 0x00000010U /* SDP4 interrupt enable */ +#define TXGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ +#define TXGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ +#define TXGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_4 0x00000010U /* SDP3 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define TXGBE_GPIO_INT_POLARITY_3 0x00000008U +#define TXGBE_GPIO_INT_POLARITY_4 0x00000010U +#define TXGBE_GPIO_INT_DEBOUNCE_2 0x00000004U +#define TXGBE_GPIO_INT_DEBOUNCE_3 0x00000008U +#define TXGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_4 0x00000010U /* SDP4 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ +#define TXGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ +#define TXGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define TXGBE_GPIO_EOI_4 0x00000010U /* SDP3 interrupt clear */ +#define TXGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ +#define TXGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ +#define TXGBE_SFP1_MOD_ABS_LS 0x00000004U /* GPIO_EXT SFP ABSENT*/ +#define TXGBE_SFP1_RX_LOS_LS 0x00000008U /* GPIO_EXT RX LOSS */ + +#define TXGBE_SFP1_MOD_PRST_LS 0x00000010U /* GPIO_EXT SFP ABSENT*/ + +/* TPH registers */ +#define TXGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define TXGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define TXGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define TXGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ +/* TPH bit */ +#define TXGBE_CFG_TPH_TDESC_EN 0x80000000U +#define TXGBE_CFG_TPH_TDESC_PH_SHIFT 29 +#define TXGBE_CFG_TPH_TDESC_ST_SHIFT 16 +#define TXGBE_CFG_TPH_RDESC_EN 0x80000000U +#define TXGBE_CFG_TPH_RDESC_PH_SHIFT 29 +#define TXGBE_CFG_TPH_RDESC_ST_SHIFT 16 +#define TXGBE_CFG_TPH_RHDR_EN 0x00008000U +#define TXGBE_CFG_TPH_RHDR_PH_SHIFT 13 +#define TXGBE_CFG_TPH_RHDR_ST_SHIFT 0 +#define TXGBE_CFG_TPH_RPL_EN 0x80000000U +#define TXGBE_CFG_TPH_RPL_PH_SHIFT 29 +#define TXGBE_CFG_TPH_RPL_ST_SHIFT 16 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define TXGBE_TDM_CTL 0x18000 +#define TXGBE_TDM_VF_TE(_i) (0x18004 + ((_i) * 4)) +#define TXGBE_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) +#define TXGBE_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define TXGBE_TDM_LLQ(_i) (0x18040 + ((_i) * 4)) /* 4 of these (0-3) */ +#define TXGBE_TDM_ETYPE_LB_L 0x18050 +#define TXGBE_TDM_ETYPE_LB_H 0x18054 +#define TXGBE_TDM_ETYPE_AS_L 0x18058 +#define TXGBE_TDM_ETYPE_AS_H 0x1805C +#define TXGBE_TDM_MAC_AS_L 0x18060 +#define TXGBE_TDM_MAC_AS_H 0x18064 +#define TXGBE_TDM_VLAN_AS_L 0x18070 +#define TXGBE_TDM_VLAN_AS_H 0x18074 +#define TXGBE_TDM_TCP_FLG_L 0x18078 +#define TXGBE_TDM_TCP_FLG_H 0x1807C +#define TXGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 64 of these 0 - 63 */ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ + +/* TDM CTL BIT */ +#define TXGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ +#define TXGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ +#define TXGBE_TDM_CTL_VT_SHIFT 16 /* VLAN EtherType */ +/* Per VF Port VLAN insertion rules */ +#define TXGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ +#define TXGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ +#define TXGBE_TDM_VLAN_INS_TPID_SEL_SHIFT 24 /*Tag tpid sel*/ + +#define TXGBE_TDM_RP_CTL 0x18400 +#define TXGBE_TDM_RP_CTL_RST ((0x1) << 0) +#define TXGBE_TDM_RP_CTL_RPEN ((0x1) << 2) +#define TXGBE_TDM_RP_CTL_RLEN ((0x1) << 3) +#define TXGBE_TDM_RP_IDX 0x1820C +#define TXGBE_TDM_RP_RATE 0x18404 +#define TXGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) +#define TXGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) + +#define TXGBE_TDM_RL_QUEUE_IDX 0x18210 +#define TXGBE_TDM_RL_QUEUE_CFG 0x18214 + +#define TXGBE_TDM_RL_VM_IDX 0x18218 +#define TXGBE_TDM_RL_VM_CFG 0x1821C +#define TXGBE_TDM_RL_CFG 0x18400 +#define TXGBE_TDM_RL_EN 0x00000001U +#define TXGBE_TDM_FACTOR_INT 0x00000001U +#define TXGBE_TDM_FACTOR_FRA 0x00000001U +#define TXGBE_TDM_FACTOR_INT_SHIFT 16 +#define TXGBE_TDM_FACTOR_FRA_SHIFT 2 +#define TXGBE_TDM_FACTOR_INT_MASK 0xffff0000 +#define TXGBE_TDM_FACTOR_FRA_MASK 0xfffc + +#define TXGBE_TDM_RL_EN 0x00000001U + +/* qos */ +#define TXGBE_TDM_PBWARB_CTL 0x18200 +#define TXGBE_TDM_PBWARB_CFG(_i) (0x18220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define TXGBE_TDM_MMW 0x18208 +#define TXGBE_TDM_VM_CREDIT(_i) (0x18500 + ((_i) * 4)) +#define TXGBE_TDM_VM_CREDIT_VAL(v) (0x3FF & (v)) +/* fcoe */ +#define TXGBE_TDM_FC_EOF 0x18384 +#define TXGBE_TDM_FC_SOF 0x18380 +/* etag */ +#define TXGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 64 of these 0 - 63 */ +/* statistic */ +#define TXGBE_TDM_DRP_CNT 0x18300 +#define TXGBE_TDM_SEC_DRP 0x18304 +#define TXGBE_TDM_PKT_CNT 0x18308 +#define TXGBE_TDM_OS2BMC_CNT 0x18314 + +/**************************** Receive DMA registers **************************/ +/* receive control */ +#define TXGBE_RDM_ARB_CTL 0x12000 +#define TXGBE_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define TXGBE_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) +#define TXGBE_RDM_RSC_CTL 0x1200C +#define TXGBE_RDM_ARB_CFG(_i) (0x12040 + ((_i) * 4)) /* 8 of these (0-7) */ +#define TXGBE_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) +#define TXGBE_RDM_PF_HIDE(_i) (0x12090 + ((_i) * 4)) +/* VFRE bitmask */ +#define TXGBE_RDM_VF_RE_ENABLE_ALL 0xFFFFFFFFU + +#define TXGBE_RDM_DCACHE_CTL 0x120A8 +#define TXGBE_RDM_DCACHE_CTL_EN 0x1 +#define TXGBE_RDM_RSC_CTL_FREE_CNT_DIS 0x100 + +/* amlite: rdm_rsc_ctl_free_ctl */ +#define TXGBE_RDM_RSC_CTL_FREE_CTL 0x00000080U + +/* FCoE DMA Context Registers */ +#define TXGBE_RDM_FCPTRL 0x12410 +#define TXGBE_RDM_FCPTRH 0x12414 +#define TXGBE_RDM_FCBUF 0x12418 +#define TXGBE_RDM_FCBUF_VALID ((0x1)) /* DMA Context Valid */ +#define TXGBE_RDM_FCBUF_SIZE(_v) (((_v) & 0x3) << 3) /* User Buffer Size */ +#define TXGBE_RDM_FCBUF_COUNT(_v) (((_v) & 0xFF) << 8) /* Num of User Buf */ +#define TXGBE_RDM_FCBUF_OFFSET(_v) (((_v) & 0xFFFF) << 16) /* User Buf Offset*/ +#define TXGBE_RDM_FCRW 0x12420 +#define TXGBE_RDM_FCRW_FCSEL(_v) (((_v) & 0x1FF)) /* FC X_ID: 11 bits */ +#define TXGBE_RDM_FCRW_WE ((0x1) << 14) /* Write enable */ +#define TXGBE_RDM_FCRW_RE ((0x1) << 15) /* Read enable */ +#define TXGBE_RDM_FCRW_LASTSIZE(_v) (((_v) & 0xFFFF) << 16) + +/* statistic */ +#define TXGBE_RDM_DRP_PKT 0x12500 +#define TXGBE_RDM_BMC2OS_CNT 0x12510 + +/***************************** RDB registers *********************************/ +/* Flow Control Registers */ +#define TXGBE_RDB_RFCV(_i) (0x19200 + ((_i) * 4)) /* 4 of these (0-3)*/ +#define TXGBE_RDB_RFCL(_i) (0x19220 + ((_i) * 4)) /* 8 of these (0-7)*/ +#define TXGBE_RDB_RFCH(_i) (0x19260 + ((_i) * 4)) /* 8 of these (0-7)*/ +#define TXGBE_RDB_RFCRT 0x192A0 +#define TXGBE_RDB_RFCC 0x192A4 +/* receive packet buffer */ +#define TXGBE_RDB_PB_WRAP 0x19004 +#define TXGBE_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) +#define TXGBE_RDB_PB_CTL 0x19000 +#define TXGBE_RDB_UP2TC 0x19008 +#define TXGBE_RDB_PB_SZ_SHIFT 10 +#define TXGBE_RDB_PB_SZ_MASK 0x000FFC00U +/* lli interrupt */ +#define TXGBE_RDB_LLI_THRE 0x19080 +#define TXGBE_RDB_LLI_THRE_SZ(_v) ((0xFFF & (_v))) +#define TXGBE_RDB_LLI_THRE_UP(_v) ((0x7 & (_v)) << 16) +#define TXGBE_RDB_LLI_THRE_UP_SHIFT 16 + +/* ring assignment */ +#define TXGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) +#define TXGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) +#define TXGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) +#define TXGBE_RDB_RSS_TC 0x194F0 +#define TXGBE_RDB_RA_CTL 0x194F4 +#define TXGBE_RDB_5T_SA(_i) (0x19600 + ((_i) * 4)) /* Src Addr Q Filter */ +#define TXGBE_RDB_5T_DA(_i) (0x19800 + ((_i) * 4)) /* Dst Addr Q Filter */ +#define TXGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define TXGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define TXGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define TXGBE_RDB_SYN_CLS 0x19130 +#define TXGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*128 of these (0-127)*/ +/* Flow Director registers */ +#define TXGBE_RDB_FDIR_CTL 0x19500 +#define TXGBE_RDB_FDIR_HKEY 0x19568 +#define TXGBE_RDB_FDIR_SKEY 0x1956C +#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C +#define TXGBE_RDB_FDIR_SA4_MSK 0x19540 +#define TXGBE_RDB_FDIR_TCP_MSK 0x19544 +#define TXGBE_RDB_FDIR_UDP_MSK 0x19548 +#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560 +#define TXGBE_RDB_FDIR_IP6_MSK 0x19574 +#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570 +#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4)) +/* Flow Director Stats registers */ +#define TXGBE_RDB_FDIR_FREE 0x19538 +#define TXGBE_RDB_FDIR_LEN 0x1954C +#define TXGBE_RDB_FDIR_USE_ST 0x19550 +#define TXGBE_RDB_FDIR_FAIL_ST 0x19554 +#define TXGBE_RDB_FDIR_MATCH 0x19558 +#define TXGBE_RDB_FDIR_MISS 0x1955C +/* Flow Director Programming registers */ +#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 3 of these (0-2)*/ +#define TXGBE_RDB_FDIR_SA 0x19518 +#define TXGBE_RDB_FDIR_DA 0x1951C +#define TXGBE_RDB_FDIR_PORT 0x19520 +#define TXGBE_RDB_FDIR_FLEX 0x19524 +#define TXGBE_RDB_FDIR_HASH 0x19528 +#define TXGBE_RDB_FDIR_CMD 0x1952C +/* VM RSS */ +#define TXGBE_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40)) +#define TXGBE_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40)) +/* FCoE Redirection */ +#define TXGBE_RDB_FCRE_TBL_SIZE (8) /* Max entries in FCRETA */ +#define TXGBE_RDB_FCRE_CTL 0x19140 +#define TXGBE_RDB_FCRE_CTL_ENA ((0x1)) /* FCoE Redir Table Enable */ +#define TXGBE_RDB_FCRE_TBL(_i) (0x19160 + ((_i) * 4)) +#define TXGBE_RDB_FCRE_TBL_RING(_v) (((_v) & 0x7F)) /* output queue number */ +/* statistic */ +#define TXGBE_RDB_MPCNT(_i) (0x19040 + ((_i) * 4)) /* 8 of 3FA0-3FBC*/ +#define TXGBE_RDB_PKT_CNT 0x19060 +#define TXGBE_RDB_DRP_CNT 0x19068 +#define TXGBE_RDB_LXONTXC 0x1921C +#define TXGBE_RDB_LXOFFTXC 0x19218 +#define TXGBE_RDB_PXON2OFFCNT(_i) (0x19280 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_RDB_PXONTXC(_i) (0x192E0 + ((_i) * 4)) /* 8 of 3F00-3F1C*/ +#define TXGBE_RDB_PXOFFTXC(_i) (0x192C0 + ((_i) * 4)) /* 8 of 3F20-3F3C*/ +#define TXGBE_RDB_PFCMACDAL 0x19210 +#define TXGBE_RDB_PFCMACDAH 0x19214 +#define TXGBE_RDB_TXSWERR 0x1906C +#define TXGBE_RDB_TXSWERR_TB_FREE 0x3FF +/* rdb_pl_cfg reg mask */ +#define TXGBE_RDB_PL_CFG_L4HDR 0x2 +#define TXGBE_RDB_PL_CFG_L3HDR 0x4 +#define TXGBE_RDB_PL_CFG_L2HDR 0x8 +#define TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define TXGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 +#define TXGBE_RDB_PL_CFG_RSS_PL_MASK 0x7 +#define TXGBE_RDB_PL_CFG_RSS_PL_SHIFT 29 +#define TXGBE_RDB_PL_CFG_RSS_EN 0x1000000 +#define TXGBE_RDB_PL_CFG_RSS_MASK 0xFF0000 +/* RQTC Bit Masks and Shifts */ +#define TXGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) +#define TXGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) +#define TXGBE_RDB_RSS_TC_TC1_MASK (0x7 << 4) +#define TXGBE_RDB_RSS_TC_TC2_MASK (0x7 << 8) +#define TXGBE_RDB_RSS_TC_TC3_MASK (0x7 << 12) +#define TXGBE_RDB_RSS_TC_TC4_MASK (0x7 << 16) +#define TXGBE_RDB_RSS_TC_TC5_MASK (0x7 << 20) +#define TXGBE_RDB_RSS_TC_TC6_MASK (0x7 << 24) +#define TXGBE_RDB_RSS_TC_TC7_MASK (0x7 << 28) +/* Packet Buffer Initialization */ +#define TXGBE_MAX_PACKET_BUFFERS 8 +#define TXGBE_RDB_PB_SZ_48KB 0x00000030U /* 48KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_64KB 0x00000040U /* 64KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_80KB 0x00000050U /* 80KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_128KB 0x00000080U /* 128KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_MAX 0x00000200U /* 512KB Packet Buffer */ + + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + + +/* FCRTL Bit Masks */ +#define TXGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define TXGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ +/* FCCFG Bit Masks */ +#define TXGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ +#define TXGBE_RDB_RFCC_RFCE_PRIORITY 0x00000010U /* Tx priority FC enable */ + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define TXGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ +#define TXGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define TXGBE_RDB_5T_CTL1_RING_MASK 0x0FE00000U /* Rx queue index mask */ +#define TXGBE_RDB_5T_CTL1_RING_SHIFT 21 +#define TXGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ +#define TXGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ +#define TXGBE_RDB_LLI_THRE_CMN_EN 0x00100000U /* cmn packet receiveed */ + +#define TXGBE_MAX_RDB_5T_CTL0_FILTERS 128 +#define TXGBE_RDB_5T_CTL0_PROTOCOL_MASK 0x00000003U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_TCP 0x00000000U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_UDP 0x00000001U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_SCTP 2 +#define TXGBE_RDB_5T_CTL0_PRIORITY_MASK 0x00000007U +#define TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT 2 +#define TXGBE_RDB_5T_CTL0_POOL_MASK 0x0000003FU +#define TXGBE_RDB_5T_CTL0_POOL_SHIFT 8 +#define TXGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK 0x0000001FU +#define TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT 25 +#define TXGBE_RDB_5T_CTL0_SOURCE_ADDR_MASK 0x1E +#define TXGBE_RDB_5T_CTL0_DEST_ADDR_MASK 0x1D +#define TXGBE_RDB_5T_CTL0_SOURCE_PORT_MASK 0x1B +#define TXGBE_RDB_5T_CTL0_DEST_PORT_MASK 0x17 +#define TXGBE_RDB_5T_CTL0_PROTOCOL_COMP_MASK 0x0F +#define TXGBE_RDB_5T_CTL0_POOL_MASK_EN 0x40000000U +#define TXGBE_RDB_5T_CTL0_QUEUE_ENABLE 0x80000000U + +#define TXGBE_RDB_ETYPE_CLS_RX_QUEUE 0x007F0000U /* bits 22:16 */ +#define TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT 16 +#define TXGBE_RDB_ETYPE_CLS_LLI 0x20000000U /* bit 29 */ +#define TXGBE_RDB_ETYPE_CLS_QUEUE_EN 0x80000000U /* bit 31 */ + +/* Receive Config masks */ +#define TXGBE_RDB_PB_CTL_RXEN (0x80000000) /* Enable Receiver */ +#define TXGBE_RDB_PB_CTL_DISABLED 0x1 + +#define TXGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define TXGBE_RDB_RA_CTL_MULTI_RSS 0x00000001U /* VF RSS Hash Rule Enable */ +#define TXGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +enum txgbe_fdir_pballoc_type { + TXGBE_FDIR_PBALLOC_NONE = 0, + TXGBE_FDIR_PBALLOC_64K = 1, + TXGBE_FDIR_PBALLOC_128K = 2, + TXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define TXGBE_RDB_FDIR_CTL_PBALLOC_64K 0x00000001U +#define TXGBE_RDB_FDIR_CTL_PBALLOC_128K 0x00000002U +#define TXGBE_RDB_FDIR_CTL_PBALLOC_256K 0x00000003U +#define TXGBE_RDB_FDIR_CTL_INIT_DONE 0x00000008U +#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH 0x00000010U +#define TXGBE_RDB_FDIR_CTL_REPORT_STATUS 0x00000020U +#define TXGBE_RDB_FDIR_CTL_REPORT_STATUS_ALWAYS 0x00000080U +#define TXGBE_RDB_FDIR_CTL_DROP_Q_SHIFT 8 +#define TXGBE_RDB_FDIR_CTL_FILTERMODE_SHIFT 21 +#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT 24 +#define TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT 20 +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH_MASK 0xF0000000U +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT 28 + + +#define TXGBE_RDB_FDIR_TCP_MSK_DPORTM_SHIFT 16 +#define TXGBE_RDB_FDIR_UDP_MSK_DPORTM_SHIFT 16 +#define TXGBE_RDB_FDIR_IP6_MSK_DIPM_SHIFT 16 +#define TXGBE_RDB_FDIR_OTHER_MSK_POOL 0x00000004U +#define TXGBE_RDB_FDIR_OTHER_MSK_L4P 0x00000008U +#define TXGBE_RDB_FDIR_OTHER_MSK_L3P 0x00000010U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN_TYPE 0x00000020U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN_OUTIP 0x00000040U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN 0x00000080U + +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC 0x00000000U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_IP 0x00000001U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_L4_HDR 0x00000002U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_L4_PAYLOAD 0x00000003U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK 0x00000003U +#define TXGBE_RDB_FDIR_FLEX_CFG_MSK 0x00000004U +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST 0x000000F8U +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT 3 +#define TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT 8 + +#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16 +#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16 +#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT 15 +#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT 16 + +#define TXGBE_RDB_FDIR_CMD_CMD_MASK 0x00000003U +#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW 0x00000001U +#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW 0x00000002U +#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT 0x00000003U +#define TXGBE_RDB_FDIR_CMD_FILTER_VALID 0x00000004U +#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE 0x00000008U +#define TXGBE_RDB_FDIR_CMD_IPv6DMATCH 0x00000010U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_UDP 0x00000020U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_TCP 0x00000040U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_SCTP 0x00000060U +#define TXGBE_RDB_FDIR_CMD_IPV6 0x00000080U +#define TXGBE_RDB_FDIR_CMD_CLEARHT 0x00000100U +#define TXGBE_RDB_FDIR_CMD_DROP 0x00000200U +#define TXGBE_RDB_FDIR_CMD_INT 0x00000400U +#define TXGBE_RDB_FDIR_CMD_LAST 0x00000800U +#define TXGBE_RDB_FDIR_CMD_COLLISION 0x00001000U +#define TXGBE_RDB_FDIR_CMD_QUEUE_EN 0x00008000U +#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT 5 +#define TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT 16 +#define TXGBE_RDB_FDIR_CMD_TUNNEL_FILTER_SHIFT 23 +#define TXGBE_RDB_FDIR_CMD_VT_POOL_SHIFT 24 +#define TXGBE_RDB_FDIR_INIT_DONE_POLL 10 +#define TXGBE_RDB_FDIR_CMD_CMD_POLL 10 +#define TXGBE_RDB_FDIR_CMD_TUNNEL_FILTER 0x00800000U +#define TXGBE_RDB_FDIR_DROP_QUEUE 127 +#define TXGBE_FDIR_INIT_DONE_POLL 10 + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define TXGBE_PSR_CTL 0x15000 +#define TXGBE_PSR_VLAN_CTL 0x15088 +#define TXGBE_PSR_VM_CTL 0x151B0 +#define TXGBE_PSR_PKT_CNT 0x151B8 +#define TXGBE_PSR_DBG_DRP_CNT 0x151C0 +/* Header split receive */ +#define TXGBE_PSR_CTL_SW_EN 0x00040000U +#define TXGBE_PSR_CTL_RSC_DIS 0x00010000U +#define TXGBE_PSR_CTL_RSC_ACK 0x00020000U +#define TXGBE_PSR_CTL_PCSD 0x00002000U +#define TXGBE_PSR_CTL_IPPCSE 0x00001000U +#define TXGBE_PSR_CTL_BAM 0x00000400U +#define TXGBE_PSR_CTL_UPE 0x00000200U +#define TXGBE_PSR_CTL_MPE 0x00000100U +#define TXGBE_PSR_CTL_MFE 0x00000080U +#define TXGBE_PSR_CTL_MO 0x00000060U +#define TXGBE_PSR_CTL_TPE 0x00000010U +#define TXGBE_PSR_CTL_MO_SHIFT 5 +/* VT_CTL bitmasks */ +#define TXGBE_PSR_VM_CTL_DIS_DEFPL 0x20000000U /* disable default pool */ +#define TXGBE_PSR_VM_CTL_REPLEN 0x40000000U /* replication enabled */ +#define TXGBE_PSR_VM_CTL_POOL_SHIFT 7 +#define TXGBE_PSR_VM_CTL_POOL_MASK (0x3F << TXGBE_PSR_VM_CTL_POOL_SHIFT) +/* VLAN Control Bit Masks */ +#define TXGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define TXGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define TXGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define TXGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define TXGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +/* VMOLR bitmasks */ +#define TXGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define TXGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define TXGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define TXGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define TXGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define TXGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define TXGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define TXGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define TXGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define TXGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define TXGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* etype switcher 1st stage */ +#define TXGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 2 /* now only support 2 custom filters */ +#define TXGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define TXGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define TXGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define TXGBE_PSR_ETYPE_SWC_POOL_ENABLE (1 << 26) /* bit 26 */ +#define TXGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define TXGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define TXGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define TXGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define TXGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/* mcasst/ucast overflow tbl */ +#define TXGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define TXGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* vlan tbl */ +#define TXGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define TXGBE_PSR_MAC_SWC_AD_L 0x16200 +#define TXGBE_PSR_MAC_SWC_AD_H 0x16204 +#define TXGBE_PSR_MAC_SWC_VM_L 0x16208 +#define TXGBE_PSR_MAC_SWC_VM_H 0x1620C +#define TXGBE_PSR_MAC_SWC_IDX 0x16210 +/* RAH */ +#define TXGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define TXGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define TXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define TXGBE_PSR_VLAN_SWC 0x16220 +#define TXGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define TXGBE_PSR_VLAN_SWC_VM_H 0x16228 +#define TXGBE_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ +/* VLAN pool filtering masks */ +#define TXGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ +#define TXGBE_PSR_VLAN_SWC_ENTRIES 64 +#define TXGBE_PSR_VLAN_SWC_VLANID_MASK 0x00000FFFU +#define TXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* cloud switch */ +#define TXGBE_PSR_CL_SWC_DST0 0x16240 +#define TXGBE_PSR_CL_SWC_DST1 0x16244 +#define TXGBE_PSR_CL_SWC_DST2 0x16248 +#define TXGBE_PSR_CL_SWC_DST3 0x1624c +#define TXGBE_PSR_CL_SWC_KEY 0x16250 +#define TXGBE_PSR_CL_SWC_CTL 0x16254 +#define TXGBE_PSR_CL_SWC_VM_L 0x16258 +#define TXGBE_PSR_CL_SWC_VM_H 0x1625c +#define TXGBE_PSR_CL_SWC_IDX 0x16260 + +#define TXGBE_PSR_CL_SWC_CTL_VLD 0x80000000U +#define TXGBE_PSR_CL_SWC_CTL_DST_MSK 0x00000002U +#define TXGBE_PSR_CL_SWC_CTL_KEY_MSK 0x00000001U + + +/* FCoE SOF/EOF */ +#define TXGBE_PSR_FC_EOF 0x15158 +#define TXGBE_PSR_FC_SOF 0x151F8 +/* FCoE Filter Context Registers */ +#define TXGBE_PSR_FC_FLT_CTXT 0x15108 +#define TXGBE_PSR_FC_FLT_CTXT_VALID ((0x1)) /* Filter Context Valid */ +#define TXGBE_PSR_FC_FLT_CTXT_FIRST ((0x1) << 1) /* Filter First */ +#define TXGBE_PSR_FC_FLT_CTXT_WR ((0x1) << 2) /* Write/Read Context */ +#define TXGBE_PSR_FC_FLT_CTXT_SEQID(_v) (((_v) & 0xFF) << 8) /* Sequence ID */ +#define TXGBE_PSR_FC_FLT_CTXT_SEQCNT(_v) (((_v) & 0xFFFF) << 16) /* Seq Count */ + +#define TXGBE_PSR_FC_FLT_RW 0x15110 +#define TXGBE_PSR_FC_FLT_RW_FCSEL(_v) (((_v) & 0x1FF)) /* FC OX_ID: 11 bits */ +#define TXGBE_PSR_FC_FLT_RW_RVALDT ((0x1) << 13) /* Fast Re-Validation */ +#define TXGBE_PSR_FC_FLT_RW_WE ((0x1) << 14) /* Write Enable */ +#define TXGBE_PSR_FC_FLT_RW_RE ((0x1) << 15) /* Read Enable */ + +#define TXGBE_PSR_FC_PARAM 0x151D8 + +/* FCoE Receive Control */ +#define TXGBE_PSR_FC_CTL 0x15100 +#define TXGBE_PSR_FC_CTL_FCOELLI ((0x1)) /* Low latency interrupt */ +#define TXGBE_PSR_FC_CTL_SAVBAD ((0x1) << 1) /* Save Bad Frames */ +#define TXGBE_PSR_FC_CTL_FRSTRDH ((0x1) << 2) /* EN 1st Read Header */ +#define TXGBE_PSR_FC_CTL_LASTSEQH ((0x1) << 3) /* EN Last Header in Seq */ +#define TXGBE_PSR_FC_CTL_ALLH ((0x1) << 4) /* EN All Headers */ +#define TXGBE_PSR_FC_CTL_FRSTSEQH ((0x1) << 5) /* EN 1st Seq. Header */ +#define TXGBE_PSR_FC_CTL_ICRC ((0x1) << 6) /* Ignore Bad FC CRC */ +#define TXGBE_PSR_FC_CTL_FCCRCBO ((0x1) << 7) /* FC CRC Byte Ordering */ +#define TXGBE_PSR_FC_CTL_FCOEVER(_v) (((_v) & 0xF) << 8) /* FCoE Version */ + +/* Management */ +#define TXGBE_PSR_MNG_FIT_CTL 0x15820 +/* Management Bit Fields and Masks */ +#define TXGBE_PSR_MNG_FIT_CTL_MPROXYE 0x40000000U /* Management Proxy Enable*/ +#define TXGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */ +#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS 0x10000000U /* Ena BMC2OS and OS2BMC + *traffic */ +#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT 28 + +#define TXGBE_PSR_MNG_FLEX_SEL 0x1582C +#define TXGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) +#define TXGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define TXGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define TXGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) +#define TXGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define TXGBE_PSR_MR_VLAN_H(_i) (0x15B14 + ((_i) * 8)) +#define TXGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) +#define TXGBE_PSR_MR_VM_H(_i) (0x15B34 + ((_i) * 8)) + +/* 1588 */ +#define TXGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define TXGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define TXGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define TXGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define TXGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define TXGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define TXGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define TXGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define TXGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define TXGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define TXGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define TXGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define TXGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define TXGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define TXGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define TXGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define TXGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define TXGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define TXGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define TXGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define TXGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define TXGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define TXGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define TXGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define TXGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* Wake up registers */ +#define TXGBE_PSR_WKUP_CTL 0x15B80 +#define TXGBE_PSR_WKUP_IPV 0x15B84 +#define TXGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define TXGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) +#define TXGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define TXGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_CTL 0x15CFC +/* Wake Up Filter Control Bit */ +#define TXGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define TXGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset + * assertion */ +/* Mask for Ext. flex filters */ +#define TXGBE_PSR_WKUP_CTL_EXT_FLX_FILTERS 0x00300000U +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS 0x000F00FFU /* Mask all 4 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS_6 0x003F00FFU /* Mask all 6 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS_8 0x00FF00FFU /* Mask all 8 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FLX_OFFSET 16 /* Offset to the Flex Filters bits*/ + +#define TXGBE_PSR_MAX_SZ 0x15020 + +/****************************** TDB ******************************************/ +#define TXGBE_TDB_TFCS 0x1CE00 +#define TXGBE_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_TDB_MNG_TC 0x1CD10 +#define TXGBE_TDB_PRB_CTL 0x17010 +#define TXGBE_TDB_PBRARB_CTL 0x1CD00 +#define TXGBE_TDB_UP2TC 0x1C800 +#define TXGBE_TDB_PBRARB_CFG(_i) (0x1CD20 + ((_i) * 4)) /* 8 of (0-7) */ + +#define TXGBE_TDB_PB_SZ_20KB 0x00005000U /* 20KB Packet Buffer */ +#define TXGBE_TDB_PB_SZ_40KB 0x0000A000U /* 40KB Packet Buffer */ +#define TXGBE_TDB_PB_SZ_MAX 0x00028000U /* 160KB Packet Buffer */ +#define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define TXGBE_MAX_PB 8 + +/* statistic */ +#define TXGBE_TDB_OUT_PKT_CNT 0x1CF00 + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define TXGBE_TSC_CTL 0x1D000 +#define TXGBE_TSC_ST 0x1D004 +#define TXGBE_TSC_BUF_AF 0x1D008 +#define TXGBE_TSC_BUF_AE 0x1D00C +#define TXGBE_TSC_PRB_CTL 0x1D010 +#define TXGBE_TSC_MIN_IFG 0x1D020 +/* Security Bit Fields and Masks */ +#define TXGBE_TSC_CTL_SECTX_DIS 0x00000001U +#define TXGBE_TSC_CTL_TX_DIS 0x00000002U +#define TXGBE_TSC_CTL_STORE_FORWARD 0x00000004U +#define TXGBE_TSC_CTL_IV_MSK_EN 0x00000008U +#define TXGBE_TSC_ST_SECTX_RDY 0x00000001U +#define TXGBE_TSC_ST_OFF_DIS 0x00000002U +#define TXGBE_TSC_ST_ECC_TXERR 0x00000004U +#define TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK 0x000f0000U + +/* LinkSec (MacSec) Registers */ +#define TXGBE_TSC_LSEC_CAP 0x1D200 +#define TXGBE_TSC_LSEC_CTL 0x1D204 +#define TXGBE_TSC_LSEC_SCI_L 0x1D208 +#define TXGBE_TSC_LSEC_SCI_H 0x1D20C +#define TXGBE_TSC_LSEC_SA 0x1D210 +#define TXGBE_TSC_LSEC_PKTNUM0 0x1D214 +#define TXGBE_TSC_LSEC_PKTNUM1 0x1D218 +#define TXGBE_TSC_LSEC_KEY0(_n) 0x1D21C +#define TXGBE_TSC_LSEC_KEY1(_n) 0x1D22C +#define TXGBE_TSC_LSEC_UNTAG_PKT 0x1D23C +#define TXGBE_TSC_LSEC_ENC_PKT 0x1D240 +#define TXGBE_TSC_LSEC_PROT_PKT 0x1D244 +#define TXGBE_TSC_LSEC_ENC_OCTET 0x1D248 +#define TXGBE_TSC_LSEC_PROT_OCTET 0x1D24C + +/* IpSec Registers */ +#define TXGBE_TSC_IPS_IDX 0x1D100 +#define TXGBE_TSC_IPS_IDX_WT 0x80000000U +#define TXGBE_TSC_IPS_IDX_RD 0x40000000U +#define TXGBE_TSC_IPS_IDX_SD_IDX 0x0U /* */ +#define TXGBE_TSC_IPS_IDX_EN 0x00000001U +#define TXGBE_TSC_IPS_SALT 0x1D104 +#define TXGBE_TSC_IPS_KEY(i) (0x1D108 + ((i) * 4)) + +/* 1588 */ +#define TXGBE_TSC_1588_CTL 0x1D400 /* Tx Time Sync Control reg */ +#define TXGBE_TSC_1588_STMPL 0x1D404 /* Tx timestamp value Low */ +#define TXGBE_TSC_1588_STMPH 0x1D408 /* Tx timestamp value High */ +#define TXGBE_TSC_1588_SYSTIML 0x1D40C /* System time register Low */ +#define TXGBE_TSC_1588_SYSTIMH 0x1D410 /* System time register High */ +#define TXGBE_TSC_1588_INC 0x1D414 /* Increment attributes reg */ +#define TXGBE_TSC_1588_INC_IV(v) (((v) & 0xFFFFFF)) +#define TXGBE_TSC_1588_INC_IP(v) (((v) & 0xFF) << 24) +#define TXGBE_TSC_1588_INC_IVP(v, p) \ + (((v) & 0xFFFFFF) | TXGBE_TSC_1588_INC_IP(p)) + +#define TXGBE_TSC_1588_ADJL 0x1D418 /* Time Adjustment Offset reg Low */ +#define TXGBE_TSC_1588_ADJH 0x1D41C /* Time Adjustment Offset reg High*/ + +/* 1588 fields */ +#define TXGBE_TSC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define TXGBE_TSC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +#define TXGBE_TSEC_1588_AUX_CTL 0x1D428 +#define TXGBE_TSEC_1588_TRGT_L(i) (0x1D42C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_TRGT_H(i) (0x1D430 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_FREQ_CLK_L(i) (0x1D43C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_FREQ_CLK_H(i) (0x1D440 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_AUX_STMP_L(i) (0x1D44C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_AUX_STMP_H(i) (0x1D450 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_SDP(n) (0x1D45C + ((n) * 4)) /* [0,3] */ + +#define TXGBE_TSEC_1588_INT_ST 0x1D420 +#define TXGBE_TSEC_1588_INT_EN 0x1D424 + +#define TXGBE_TSEC_1588_INT_ST_TT0 0x10 +#define TXGBE_TSEC_1588_INT_ST_TT1 0x20 +#define TXGBE_TSEC_1588_INT_EN_TT0 0x10 +#define TXGBE_TSEC_1588_INT_EN_TT1 0x20 + +#define TXGBE_TSEC_1588_AUX_CTL_EN_TT0 0x1 +#define TXGBE_TSEC_1588_AUX_CTL_PLSG 0x2 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TT1 0x4 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TS0 0x100 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TS1 0x400 + +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TT0 0x1 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TT1 0x2 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_CL0 0x3 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_CL1 0x4 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TS0 0x5 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TS1 0x6 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_MASK 0x7 +#define TXGBE_TSEC_1588_SDP_OUT_LEVEL_LOW 0x10 +#define TXGBE_TSEC_1588_SDP_OUT_LEVEL_HIGH 0x0 + + +/********************************* RSEC **************************************/ +/* general rsec */ +#define TXGBE_RSC_CTL 0x17000 +#define TXGBE_RSC_ST 0x17004 +/* general rsec fields */ +#define TXGBE_RSC_CTL_SECRX_DIS 0x00000001U +#define TXGBE_RSC_CTL_RX_DIS 0x00000002U +#define TXGBE_RSC_CTL_CRC_STRIP 0x00000004U +#define TXGBE_RSC_CTL_IV_MSK_EN 0x00000008U +#define TXGBE_RSC_CTL_SAVE_MAC_ERR 0x00000040U +#define TXGBE_RSC_ST_RSEC_RDY 0x00000001U +#define TXGBE_RSC_ST_RSEC_OFLD_DIS 0x00000002U +#define TXGBE_RSC_ST_ECC_RXERR 0x00000004U + +/* link sec */ +#define TXGBE_RSC_LSEC_CAP 0x17200 +#define TXGBE_RSC_LSEC_CTL 0x17204 +#define TXGBE_RSC_LSEC_SCI_L 0x17208 +#define TXGBE_RSC_LSEC_SCI_H 0x1720C +#define TXGBE_RSC_LSEC_SA0 0x17210 +#define TXGBE_RSC_LSEC_SA1 0x17214 +#define TXGBE_RSC_LSEC_PKNUM0 0x17218 +#define TXGBE_RSC_LSEC_PKNUM1 0x1721C +#define TXGBE_RSC_LSEC_KEY0(_n) 0x17220 +#define TXGBE_RSC_LSEC_KEY1(_n) 0x17230 +#define TXGBE_RSEC_LSEC_UNTAG_PKT 0x17240 +#define TXGBE_RSC_LSEC_DEC_OCTET 0x17244 +#define TXGBE_RSC_LSEC_VLD_OCTET 0x17248 +#define TXGBE_RSC_LSEC_BAD_PKT 0x1724C +#define TXGBE_RSC_LSEC_NOSCI_PKT 0x17250 +#define TXGBE_RSC_LSEC_UNSCI_PKT 0x17254 +#define TXGBE_RSC_LSEC_UNCHK_PKT 0x17258 +#define TXGBE_RSC_LSEC_DLY_PKT 0x1725C +#define TXGBE_RSC_LSEC_LATE_PKT 0x17260 +#define TXGBE_RSC_LSEC_OK_PKT(_n) 0x17264 +#define TXGBE_RSC_LSEC_INV_PKT(_n) 0x17274 +#define TXGBE_RSC_LSEC_BADSA_PKT 0x1727C +#define TXGBE_RSC_LSEC_INVSA_PKT 0x17280 + +/* ipsec */ +#define TXGBE_RSC_IPS_IDX 0x17100 +#define TXGBE_RSC_IPS_IDX_WT 0x80000000U +#define TXGBE_RSC_IPS_IDX_RD 0x40000000U +#define TXGBE_RSC_IPS_IDX_TB_IDX 0x0U /* */ +#define TXGBE_RSC_IPS_IDX_TB_IP 0x00000002U +#define TXGBE_RSC_IPS_IDX_TB_SPI 0x00000004U +#define TXGBE_RSC_IPS_IDX_TB_KEY 0x00000006U +#define TXGBE_RSC_IPS_IDX_EN 0x00000001U +#define TXGBE_RSC_IPS_IP(i) (0x17104 + ((i) * 4)) +#define TXGBE_RSC_IPS_SPI 0x17114 +#define TXGBE_RSC_IPS_IP_IDX 0x17118 +#define TXGBE_RSC_IPS_KEY(i) (0x1711C + ((i) * 4)) +#define TXGBE_RSC_IPS_SALT 0x1712C +#define TXGBE_RSC_IPS_MODE 0x17130 +#define TXGBE_RSC_IPS_MODE_IPV6 0x00000010 +#define TXGBE_RSC_IPS_MODE_DEC 0x00000008 +#define TXGBE_RSC_IPS_MODE_ESP 0x00000004 +#define TXGBE_RSC_IPS_MODE_AH 0x00000002 +#define TXGBE_RSC_IPS_MODE_VALID 0x00000001 /************************************** ETH PHY ******************************/ -#define TXGBE_XPCS_IDA_ADDR 0x13000 -#define TXGBE_XPCS_IDA_DATA 0x13004 +#define TXGBE_XPCS_IDA_ADDR 0x13000 +#define TXGBE_XPCS_IDA_DATA 0x13004 +#define TXGBE_ETHPHY_IDA_ADDR 0x13008 +#define TXGBE_ETHPHY_IDA_DATA 0x1300C + +/************************************** MNG ********************************/ +#define TXGBE_MNG_FW_SM 0x1E000 +#define TXGBE_MNG_SW_SM 0x1E004 +#define TXGBE_MNG_SWFW_SYNC 0x1E008 +#define TXGBE_MNG_MBOX 0x1E100 +#define TXGBE_MNG_MBOX_CTL 0x1E044 +#define TXGBE_MNG_OS2BMC_CNT 0x1E094 +#define TXGBE_MNG_BMC2OS_CNT 0x1E090 + +/* amlite: swfw mailbox changes */ +#define TXGBE_AML_MNG_MBOX_CTL_SW2FW 0x1E0A0 +#define TXGBE_AML_MNG_MBOX_SW2FW 0x1E200 +#define TXGBE_AML_MNG_MBOX_CTL_FW2SW 0x1E0A4 +#define TXGBE_AML_MNG_MBOX_FW2SW 0x1E300 + +#define TXGBE_AML_MNG_MBOX_NOTIFY 0x80000000U + +/* Firmware Semaphore Register */ +#define TXGBE_MNG_FW_SM_MODE_MASK 0xE +#define TXGBE_MNG_FW_SM_TS_ENABLED 0x1 +/* SW Semaphore Register bitmasks */ +#define TXGBE_MNG_SW_SM_SM 0x00000001U /* software Semaphore */ + +/* SW_FW_SYNC definitions */ +#define TXGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define TXGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define TXGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define TXGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define TXGBE_MNG_MBOX_CTL_SWACK 0x2 +#define TXGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define TXGBE_MNG_MBOX_CTL_FWACK 0x8 + +/************************************* ETH MAC *****************************/ +#define TXGBE_MAC_TX_CFG 0x11000 +#define TXGBE_MAC_RX_CFG 0x11004 +#define TXGBE_MAC_PKT_FLT 0x11008 +#define TXGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define TXGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define TXGBE_MAC_WDG_TIMEOUT 0x1100C +#define TXGBE_MAC_RX_FLOW_CTRL 0x11090 +#define TXGBE_MAC_ADDRESS0_HIGH 0x11300 +#define TXGBE_MAC_ADDRESS0_LOW 0x11304 +#define TXGBE_MAC_MISC_CTL 0x11f00 + +#define TXGBE_MAC_TX_CFG_TE 0x00000001U +#define TXGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define TXGBE_MAC_TX_CFG_SPEED_10G 0x00000000U +#define TXGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define TXGBE_MAC_RX_CFG_RE 0x00000001U +#define TXGBE_MAC_RX_CFG_JE 0x00000100U +#define TXGBE_MAC_RX_CFG_LM 0x00000400U +#define TXGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U +#define TXGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU +#define TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 +#define TXGBE_MAC_MISC_LINK_STS_MOD 0x1 + +#define TXGBE_LINK_BOTH_PCS_MAC 0x1 + + +#define TXGBE_EPHY_STAT 0x13404 +#define TXGBE_EPHY_STAT_PPL_LOCK 0x3 + +/* amlite: new MAC_TX_CONFIG */ +/* +{SS_3, SS_2, SS} +SS_3 in bit27, SS_2 in bit30, SS in bits29~28 +â–  4'b0000 : 40-gigabit operation using XLGMII +â–  4'b0001 : 25-gigabit operation using XLGMII +â–  4'b0010 : 50-gigabit operation using XLGMII +â–  4'b0011 : 100-gigabit operation using XLGMII +â–  4'b0100 : 10-gigabit operation using XGMII +â–  4'b0101 : 5-gigabit operation using XGMII +â–  4'b0110 : 2.5-gigabit operation using GMII +â–  4'b0111 : 1-gigabit operation using GMII +â–  4'b1000 : 2.5-gigabit operation using XGMII +â–  4'b1001-4'b1111: Reserved +*/ +#define TXGBE_MAC_TX_CFG_AML_SPEED_MASK 0x78000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_50G 0x20000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_40G 0x00000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_25G 0x10000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_10G 0x40000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_1G 0x70000000U + +#define TXGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ +#define TXGBE_MAC_RX_FLOW_CTRL_PFCE 0x00000100U /* pfc enable */ + +#define TXGBE_MSCA 0x11200 +#define TXGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define TXGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define TXGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define TXGBE_MSCC 0x11204 +#define TXGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define TXGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +enum TXGBE_MSCA_CMD_value { + TXGBE_MSCA_CMD_RSV = 0, + TXGBE_MSCA_CMD_WRITE, + TXGBE_MSCA_CMD_POST_READ, + TXGBE_MSCA_CMD_READ, +}; +#define TXGBE_MSCC_SADDR ((0x1U) << 18) +#define TXGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define TXGBE_MSCC_BUSY ((0x1U) << 22) + +#define TXGBE_MAC_MDIO_CLAUSE_22_PORT 0x11220 +#define TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22 0xF + +/* EEE registers */ + +/* statistic */ +#define TXGBE_MAC_LXONRXC 0x11E0C +#define TXGBE_MAC_LXOFFRXC 0x11988 +#define TXGBE_MAC_PXONRXC(_i) (0x11E30 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_MAC_PXOFFRXC 0x119DC +#define TXGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define TXGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define TXGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define TXGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define TXGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define TXGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define TXGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define TXGBE_MMC_CONTROL 0x11800 +#define TXGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define TXGBE_MMC_CONTROL_UP 0x700 + + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define TXGBE_BME_CTL 0x12020 +#define TXGBE_PX_MISC_IC 0x100 +#define TXGBE_PX_MISC_ICS 0x104 +#define TXGBE_PX_MISC_IEN 0x108 +#define TXGBE_PX_MISC_IVAR 0x4FC +#define TXGBE_PX_GPIE 0x118 +#define TXGBE_PX_ISB_ADDR_L 0x160 +#define TXGBE_PX_ISB_ADDR_H 0x164 +#define TXGBE_PX_TCP_TIMER 0x170 +#define TXGBE_PX_ITRSEL 0x180 +#define TXGBE_PX_IC(_i) (0x120 + (_i) * 4) +#define TXGBE_PX_ICS(_i) (0x130 + (_i) * 4) +#define TXGBE_PX_IMS(_i) (0x140 + (_i) * 4) +#define TXGBE_PX_IMC(_i) (0x150 + (_i) * 4) +#define TXGBE_PX_IVAR(_i) (0x500 + (_i) * 4) +#define TXGBE_PX_ITR(_i) (0x200 + (_i) * 4) +#define TXGBE_PX_TRANSACTION_PENDING 0x168 +#define TXGBE_PX_INTA 0x110 + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define TXGBE_PX_MISC_IC_ETH_LKDN 0x00000100U /* eth link down */ +#define TXGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define TXGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ +#define TXGBE_PX_MISC_IC_STALL 0x00001000U /* trans or recv path is + * stalled */ +#define TXGBE_PX_MISC_IC_LINKSEC 0x00002000U /* Tx LinkSec require key + * exchange */ +#define TXGBE_PX_MISC_IC_RX_MISS 0x00004000U /* Packet Buffer Overrun */ +#define TXGBE_PX_MISC_IC_FLOW_DIR 0x00008000U /* FDir Exception */ +#define TXGBE_PX_MISC_IC_I2C 0x00010000U /* I2C interrupt */ +#define TXGBE_PX_MISC_IC_ETH_EVENT 0x00020000U /* err reported by MAC except + * eth link down */ +#define TXGBE_PX_MISC_IC_ETH_LK 0x00040000U /* link up */ +#define TXGBE_PX_MISC_IC_ETH_AN 0x00080000U /* link auto-nego done */ +#define TXGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ +#define TXGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define TXGBE_PX_MISC_IC_TXDESC 0x00400000U /* tx desc error */ +#define TXGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ +#define TXGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ +#define TXGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ +#define TXGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ +#define TXGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ +//#define TXGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +#define TXGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ + +#define TXGBE_PX_MISC_AML_ETH_LK_CHANGE 0x00000100U /* link change */ +#define TXGBE_PX_MISC_AML_ETH_PHY_EVENT 0x00040000U /* Eth phy event */ + + +/* Extended Interrupt Cause Set */ +#define TXGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U +#define TXGBE_PX_MISC_ICS_DEV_RST 0x00000400U +#define TXGBE_PX_MISC_ICS_TIMESYNC 0x00000800U +#define TXGBE_PX_MISC_ICS_STALL 0x00001000U +#define TXGBE_PX_MISC_ICS_LINKSEC 0x00002000U +#define TXGBE_PX_MISC_ICS_RX_MISS 0x00004000U +#define TXGBE_PX_MISC_ICS_FLOW_DIR 0x00008000U +#define TXGBE_PX_MISC_ICS_I2C 0x00010000U +#define TXGBE_PX_MISC_ICS_ETH_EVENT 0x00020000U +#define TXGBE_PX_MISC_ICS_ETH_LK 0x00040000U +#define TXGBE_PX_MISC_ICS_ETH_AN 0x00080000U +#define TXGBE_PX_MISC_ICS_INT_ERR 0x00100000U +#define TXGBE_PX_MISC_ICS_SPI 0x00200000U +#define TXGBE_PX_MISC_ICS_VF_MBOX 0x00800000U +#define TXGBE_PX_MISC_ICS_GPIO 0x04000000U +#define TXGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U +#define TXGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U +#define TXGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U +//#define TXGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +#define TXGBE_PX_MISC_ICS_TIMER 0x80000000U + +/* Extended Interrupt Enable Set */ +#define TXGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define TXGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define TXGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define TXGBE_PX_MISC_IEN_STALL 0x00001000U +#define TXGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define TXGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define TXGBE_PX_MISC_IEN_FLOW_DIR 0x00008000U +#define TXGBE_PX_MISC_IEN_I2C 0x00010000U +#define TXGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define TXGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define TXGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define TXGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define TXGBE_PX_MISC_IEN_SPI 0x00200000U +#define TXGBE_PX_MISC_IEN_TXDESC 0x00400000U +#define TXGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define TXGBE_PX_MISC_IEN_GPIO 0x04000000U +#define TXGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define TXGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define TXGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +//#define TXGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define TXGBE_PX_MISC_IEN_TIMER 0x80000000U + +#define TXGBE_PX_MISC_IEN_MASK ( \ + TXGBE_PX_MISC_IEN_ETH_LKDN| \ + TXGBE_PX_MISC_IEN_DEV_RST | \ + TXGBE_PX_MISC_IEN_ETH_EVENT | \ + TXGBE_PX_MISC_IEN_ETH_LK | \ + TXGBE_PX_MISC_IEN_ETH_AN | \ + TXGBE_PX_MISC_IEN_INT_ERR | \ + TXGBE_PX_MISC_IEN_VF_MBOX | \ + TXGBE_PX_MISC_IEN_GPIO | \ + TXGBE_PX_MISC_IEN_STALL | \ + TXGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + TXGBE_PX_MISC_IEN_TIMER) + +/* General purpose Interrupt Enable */ +#define TXGBE_PX_GPIE_MODEL 0x00000001U +#define TXGBE_PX_GPIE_IMEN 0x00000002U +#define TXGBE_PX_GPIE_LL_INTERVAL 0x000000F0U +#define TXGBE_PX_GPIE_RSC_DELAY 0x00000700U + +/* Interrupt Vector Allocation Registers */ +#define TXGBE_PX_IVAR_REG_NUM 64 +#define TXGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define TXGBE_MAX_INT_RATE 500000 +#define TXGBE_MIN_INT_RATE 980 +#define TXGBE_MAX_EITR 0x00000FF8U +#define TXGBE_AMLITE_MAX_EITR 0x00000FFFU +#define TXGBE_MIN_EITR 8 +#define TXGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U +#define TXGBE_PX_ITR_LLI_CREDIT 0x001f0000U +#define TXGBE_PX_ITR_LLI_MOD 0x00008000U +#define TXGBE_PX_ITR_CNT_WDIS 0x80000000U +#define TXGBE_PX_ITR_ITR_CNT 0x0FE00000U + +/* transmit DMA Registers */ +#define TXGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) +#define TXGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define TXGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define TXGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define TXGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) + +/* amlite: tx head wb */ +#define TXGBE_PX_TR_HEAD_ADDRL(_i) (0x03028 + ((_i) * 0x40)) +#define TXGBE_PX_TR_HEAD_ADDRH(_i) (0x0302C + ((_i) * 0x40)) + +/* Transmit Config masks */ +#define TXGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define TXGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define TXGBE_PX_TR_CFG_SWFLSH (1 << 26) /* Tx Desc. wr-bk flushing */ +#define TXGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define TXGBE_PX_TR_CFG_THRE_SHIFT 8 +#define TXGBE_PX_TR_CFG_HEAD_WB (1 << 27) /* amlite head wb */ +#define TXGBE_PX_TR_CFG_HEAD_WB_64BYTE (1 << 28) /* amlite head wb 64byte */ + + +#define TXGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ + (TXGBE_PX_TR_RP((q_per_pool)*(vf_number) + (vf_q_index))) +#define TXGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ + (TXGBE_PX_TR_WP((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Receive DMA Registers */ +#define TXGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) +#define TXGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define TXGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define TXGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define TXGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) + +#define TXGBE_TDM_DESC_CHK(i) 0x0180B0 + (i) * 4 /*0-3*/ +#define TXGBE_TDM_DESC_NONFATAL(i) (0x0180C0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ +/* PX_RR_CFG bit definitions */ +#define TXGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define TXGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define TXGBE_PX_RR_CFG_VLAN 0x80000000U +#define TXGBE_PX_RR_CFG_RSC 0x20000000U +#define TXGBE_PX_RR_CFG_CNTAG 0x10000000U +#define TXGBE_PX_RR_CFG_RSC_CNT_MD 0x08000000U +#define TXGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define TXGBE_PX_RR_CFG_STALL 0x02000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_1 0x00000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_4 0x00800000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_8 0x01000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_16 0x01800000U +#define TXGBE_PX_RR_CFG_RR_THER 0x00070000U +#define TXGBE_PX_RR_CFG_RR_THER_SHIFT 16 + +#define TXGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define TXGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define TXGBE_PX_RR_CFG_RR_SZ 0x0000007EU +#define TXGBE_PX_RR_CFG_RR_EN 0x00000001U + +/* amlite: desc merge */ +#define TXGBE_PX_RR_CFG_DESC_MERGE 0x00080000U + +/* statistic */ +#define TXGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) +#define TXGBE_VX_GPRC(_i) (0x01014 + (0x40 * (_i))) +#define TXGBE_VX_GPTC(_i) (0x03014 + (0x40 * (_i))) +#define TXGBE_VX_GORC_LSB(_i) (0x01018 + (0x40 * (_i))) +#define TXGBE_VX_GORC_MSB(_i) (0x0101C + (0x40 * (_i))) +#define TXGBE_VX_GOTC_LSB(_i) (0x03018 + (0x40 * (_i))) +#define TXGBE_VX_GOTC_MSB(_i) (0x0301C + (0x40 * (_i))) +#define TXGBE_VX_MPRC(_i) (0x01020 + (0x40 * (_i))) + +#define TXGBE_PX_GPRC 0x12504 +#define TXGBE_PX_GPTC 0x18308 + +#define TXGBE_PX_GORC_LSB 0x12508 +#define TXGBE_PX_GORC_MSB 0x1250C + +#define TXGBE_PX_GOTC_LSB 0x1830C +#define TXGBE_PX_GOTC_MSB 0x18310 + +/************************************* Stats registers ************************/ + + + +#define TXGBE_FCCRC 0x15160 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define TXGBE_FCOERPDC 0x12514 /* FCoE Rx Packets Dropped Count */ +#define TXGBE_FCLAST 0x12518 /* FCoE Last Error Count */ +#define TXGBE_FCOEPRC 0x15164 /* Number of FCoE Packets Received */ +#define TXGBE_FCOEDWRC 0x15168 /* Number of FCoE DWords Received */ +#define TXGBE_FCOEPTC 0x18318 /* Number of FCoE Packets Transmitted */ +#define TXGBE_FCOEDWTC 0x1831C /* Number of FCoE DWords Transmitted */ + + + + + +/*************************** Flash region definition *************************/ +/* EEC Register */ +#define TXGBE_EEC_SK 0x00000001U /* EEPROM Clock */ +#define TXGBE_EEC_CS 0x00000002U /* EEPROM Chip Select */ +#define TXGBE_EEC_DI 0x00000004U /* EEPROM Data In */ +#define TXGBE_EEC_DO 0x00000008U /* EEPROM Data Out */ +#define TXGBE_EEC_FWE_MASK 0x00000030U /* FLASH Write Enable */ +#define TXGBE_EEC_FWE_DIS 0x00000010U /* Disable FLASH writes */ +#define TXGBE_EEC_FWE_EN 0x00000020U /* Enable FLASH writes */ +#define TXGBE_EEC_FWE_SHIFT 4 +#define TXGBE_EEC_REQ 0x00000040U /* EEPROM Access Request */ +#define TXGBE_EEC_GNT 0x00000080U /* EEPROM Access Grant */ +#define TXGBE_EEC_PRES 0x00000100U /* EEPROM Present */ +#define TXGBE_EEC_ARD 0x00000200U /* EEPROM Auto Read Done */ +#define TXGBE_EEC_FLUP 0x00800000U /* Flash update command */ +#define TXGBE_EEC_SEC1VAL 0x02000000U /* Sector 1 Valid */ +#define TXGBE_EEC_FLUDONE 0x04000000U /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define TXGBE_EEC_ADDR_SIZE 0x00000400U +#define TXGBE_EEC_SIZE 0x00007800U /* EEPROM Size */ +#define TXGBE_EERD_MAX_ADDR 0x00003FFFU /* EERD alows 14 bits for addr. */ + +#define TXGBE_EEC_SIZE_SHIFT 11 +#define TXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define TXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define TXGBE_FLA_LOCKED 0x00000040U /* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 +#define TXGBE_PBANUM_LENGTH 32 /* Checksum and EEPROM pointers */ -#define TXGBE_EEPROM_LAST_WORD 0x800 -#define TXGBE_EEPROM_CHECKSUM 0x2F -#define TXGBE_EEPROM_SUM 0xBABA -#define TXGBE_EEPROM_VERSION_L 0x1D -#define TXGBE_EEPROM_VERSION_H 0x1E -#define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA - -#define TXGBE_MAX_MSIX_VECTORS 64 -#define TXGBE_MAX_FDIR_INDICES 63 - -#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) - -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 -#define TXGBE_SP_VFT_TBL_SIZE 128 -#define TXGBE_SP_RX_PB_SIZE 512 -#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ - -/* TX/RX descriptor defines */ -#define TXGBE_DEFAULT_TXD 512 -#define TXGBE_DEFAULT_TX_WORK 256 - -#if (PAGE_SIZE < 8192) -#define TXGBE_DEFAULT_RXD 512 -#define TXGBE_DEFAULT_RX_WORK 256 -#else -#define TXGBE_DEFAULT_RXD 256 -#define TXGBE_DEFAULT_RX_WORK 128 +#define TXGBE_PBANUM_PTR_GUARD 0xFAFA +#define TXGBE_EEPROM_CHECKSUM 0x2F +#define TXGBE_EEPROM_SUM 0xBABA +#define TXGBE_ATLAS0_CONFIG_PTR 0x04 +#define TXGBE_PHY_PTR 0x04 +#define TXGBE_ATLAS1_CONFIG_PTR 0x05 +#define TXGBE_OPTION_ROM_PTR 0x05 +#define TXGBE_PCIE_GENERAL_PTR 0x06 +#define TXGBE_PCIE_CONFIG0_PTR 0x07 +#define TXGBE_PCIE_CONFIG1_PTR 0x08 +#define TXGBE_CORE0_PTR 0x09 +#define TXGBE_CORE1_PTR 0x0A +#define TXGBE_MAC0_PTR 0x0B +#define TXGBE_MAC1_PTR 0x0C +#define TXGBE_CSR0_CONFIG_PTR 0x0D +#define TXGBE_CSR1_CONFIG_PTR 0x0E +#define TXGBE_PCIE_ANALOG_PTR 0x02 +#define TXGBE_SHADOW_RAM_SIZE 0x4000 +#define TXGBE_TXGBE_PCIE_GENERAL_SIZE 0x24 +#define TXGBE_PCIE_CONFIG_SIZE 0x08 +#define TXGBE_EEPROM_LAST_WORD 0x800 +#define TXGBE_FW_PTR 0x0F +#define TXGBE_PBANUM0_PTR 0x05 +#define TXGBE_PBANUM1_PTR 0x06 +#define TXGBE_ALT_MAC_ADDR_PTR 0x37 +#define TXGBE_FREE_SPACE_PTR 0x3E +#define TXGBE_SW_REGION_PTR 0x1C +#define TXGBE_SHOWROM_I2C_PTR 0xB00 +#define TXGBE_SHOWROM_I2C_END 0xF00 + + +#define TXGBE_SAN_MAC_ADDR_PTR 0x18 +#define TXGBE_DEVICE_CAPS 0x1C +#define TXGBE_EEPROM_VERSION_L 0x1D +#define TXGBE_EEPROM_VERSION_H 0x1E +#define TXGBE_ISCSI_BOOT_CONFIG 0x07 + +#define TXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define TXGBE_MAX_MSIX_VECTORS_SAPPHIRE 0x40 + +/* MSI-X capability fields masks */ +#define TXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define TXGBE_ISCSI_BOOT_CAPS 0x0033 +#define TXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define TXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define TXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define TXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define TXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define TXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define TXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define TXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define TXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define TXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define TXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define TXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define TXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define TXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define TXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define TXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define TXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define TXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define TXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define TXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define TXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define TXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define TXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define TXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define TXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define TXGBE_EEPROM_CCD_BIT 2 + +#ifndef TXGBE_EEPROM_GRANT_ATTEMPTS +#define TXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ #endif -#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) -#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#ifndef TXGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define TXGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef TXGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define TXGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define TXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define TXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define TXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define TXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define TXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define TXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define TXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define TXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define TXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define TXGBE_FW_LESM_STATE_1 0x1 +#define TXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define TXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define TXGBE_FW_PATCH_VERSION_4 0x7 +#define TXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define TXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define TXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define TXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define TXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define TXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define TXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define TXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define TXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/******************************** PCI Bus Info *******************************/ +#define TXGBE_PCI_DEVICE_STATUS 0xAA +#define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define TXGBE_PCI_LINK_STATUS 0xB2 +#define TXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define TXGBE_PCI_LINK_WIDTH 0x3F0 +#define TXGBE_PCI_LINK_WIDTH_1 0x10 +#define TXGBE_PCI_LINK_WIDTH_2 0x20 +#define TXGBE_PCI_LINK_WIDTH_4 0x40 +#define TXGBE_PCI_LINK_WIDTH_8 0x80 +#define TXGBE_PCI_LINK_SPEED 0xF +#define TXGBE_PCI_LINK_SPEED_2500 0x1 +#define TXGBE_PCI_LINK_SPEED_5000 0x2 +#define TXGBE_PCI_LINK_SPEED_8000 0x3 +#define TXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define TXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define TXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET 4 +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_MASK \ + (0x0001 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE \ + (0x01 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) + +#define TXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define TXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define TXGBE_PCIDEVCTRL2_50_100us 0x1 +#define TXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define TXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define TXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define TXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define TXGBE_PCIDEVCTRL2_1_2s 0xa +#define TXGBE_PCIDEVCTRL2_4_8s 0xd +#define TXGBE_PCIDEVCTRL2_17_34s 0xe + + +/******************* Receive Descriptor bit definitions **********************/ +#define TXGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define TXGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define TXGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define TXGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define TXGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define TXGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define TXGBE_RXD_NEXTP_SHIFT 0x00000004U +#define TXGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define TXGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define TXGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define TXGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define TXGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define TXGBE_RXD_STAT_CLASS_ID_FLM 0x00000004U /* FDir Match */ +#define TXGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define TXGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define TXGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define TXGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define TXGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define TXGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define TXGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define TXGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define TXGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define TXGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define TXGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency + * Int */ +#define TXGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define TXGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define TXGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define TXGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define TXGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define TXGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define TXGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define TXGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define TXGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define TXGBE_RXD_IPV6EX 0x00001000U /* IPv6EX */ +#define TXGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define TXGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define TXGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define TXGBE_RXD_ERR_FCERR 0x00700000U /* FCERR/FDIRERR */ +#define TXGBE_RXD_ERR_FDIR_LEN 0x00100000U /* FDIR Length error */ +#define TXGBE_RXD_ERR_FDIR_DROP 0x00200000U /* FDIR Drop error */ +#define TXGBE_RXD_ERR_FDIR_COLL 0x00400000U /* FDIR Collision error */ +#define TXGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define TXGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define TXGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define TXGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define TXGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define TXGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define TXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define TXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU -#define TXGBE_MAX_EITR GENMASK(11, 3) +#define TXGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define TXGBE_RXD_TPID_MASK 0x000001C0U +#define TXGBE_RXD_TPID_SHIFT 6 +#define TXGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define TXGBE_RXD_RSCCNT_MASK 0x001E0000U +#define TXGBE_RXD_RSCCNT_SHIFT 17 +#define TXGBE_RXD_HDRBUFLEN_SHIFT 5 +#define TXGBE_RXD_SPLITHEADER_EN 0x00001000U +#define TXGBE_RXD_SPH 0x8000 -extern char txgbe_driver_name[]; +/* RSS Hash results */ +#define TXGBE_RXD_RSSTYPE_NONE 0x00000000U +#define TXGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define TXGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define TXGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define TXGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define TXGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define TXGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define TXGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define TXGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U -static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define TXGBE_PTYPE_TUN_IPV4 (0x80) +#define TXGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define TXGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define TXGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define TXGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define TXGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define TXGBE_PTYPE_PKT_MAC (0x10) +#define TXGBE_PTYPE_PKT_IP (0x20) +#define TXGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define TXGBE_PTYPE_TYP_MAC (0x01) +#define TXGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define TXGBE_PTYPE_TYP_FIP (0x03) +#define TXGBE_PTYPE_TYP_LLDP (0x04) +#define TXGBE_PTYPE_TYP_CNM (0x05) +#define TXGBE_PTYPE_TYP_EAPOL (0x06) +#define TXGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define TXGBE_PTYPE_PKT_IPV6 (0x08) +#define TXGBE_PTYPE_TYP_IPFRAG (0x01) +#define TXGBE_PTYPE_TYP_IP (0x02) +#define TXGBE_PTYPE_TYP_UDP (0x03) +#define TXGBE_PTYPE_TYP_TCP (0x04) +#define TXGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define TXGBE_PTYPE_PKT_VFT (0x08) +#define TXGBE_PTYPE_TYP_FCOE (0x00) +#define TXGBE_PTYPE_TYP_FCDATA (0x01) +#define TXGBE_PTYPE_TYP_FCRDY (0x02) +#define TXGBE_PTYPE_TYP_FCRSP (0x03) +#define TXGBE_PTYPE_TYP_FCOTHER (0x04) + +/* Packet type non-ip values */ +enum txgbe_l2_ptypes { + TXGBE_PTYPE_L2_ABORTED = (TXGBE_PTYPE_PKT_MAC), + TXGBE_PTYPE_L2_MAC = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_MAC), + TXGBE_PTYPE_L2_TS = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_TS), + TXGBE_PTYPE_L2_FIP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_FIP), + TXGBE_PTYPE_L2_LLDP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_LLDP), + TXGBE_PTYPE_L2_CNM = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_CNM), + TXGBE_PTYPE_L2_EAPOL = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_EAPOL), + TXGBE_PTYPE_L2_ARP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_ARP), + + TXGBE_PTYPE_L2_IPV4_FRAG = (TXGBE_PTYPE_PKT_IP | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV4 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_IP), + TXGBE_PTYPE_L2_IPV4_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV4_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV4_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_SCTP), + TXGBE_PTYPE_L2_IPV6_FRAG = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV6 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IP), + TXGBE_PTYPE_L2_IPV6_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV6_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV6_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_SCTP), + + TXGBE_PTYPE_L2_FCOE = (TXGBE_PTYPE_PKT_FCOE | TXGBE_PTYPE_TYP_FCOE), + TXGBE_PTYPE_L2_FCOE_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCOTHER), + TXGBE_PTYPE_L2_FCOE_VFT = (TXGBE_PTYPE_PKT_FCOE | TXGBE_PTYPE_PKT_VFT), + TXGBE_PTYPE_L2_FCOE_VFT_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_VFT_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_VFT_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCOTHER), + + TXGBE_PTYPE_L2_TUN4_MAC = (TXGBE_PTYPE_TUN_IPV4 | TXGBE_PTYPE_PKT_IGM), + TXGBE_PTYPE_L2_TUN6_MAC = (TXGBE_PTYPE_TUN_IPV6 | TXGBE_PTYPE_PKT_IGM), +}; + +#define TXGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define TXGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define TXGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define TXGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define TXGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +/* Security Processing bit Indication */ +#define TXGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U +#define TXGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U +#define TXGBE_RXD_LNKSEC_ERROR_REPLAY_ERROR 0x10000000U +#define TXGBE_RXD_LNKSEC_ERROR_BIT_MASK 0x18000000U +#define TXGBE_RXD_LNKSEC_ERROR_BAD_SIG 0x18000000U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define TXGBE_RXD_ERR_FRAME_ERR_MASK TXGBE_RXD_ERR_RXE + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define TXGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define TXGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define TXGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define TXGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define TXGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define TXGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define TXGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define TXGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define TXGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define TXGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define TXGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define TXGBE_TXD_RS 0x08000000U /* Report Status */ +#define TXGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define TXGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define TXGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define TXGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define TXGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define TXGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define TXGBE_TXD_CC 0x00000080U /* Check Context */ +#define TXGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define TXGBE_TXD_IIPCS 0x00000400U +#define TXGBE_TXD_EIPCS 0x00000800U +#define TXGBE_TXD_L4CS 0x00000200U +#define TXGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define TXGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define TXGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define TXGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define TXGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define TXGBE_TXD_ENC_SHIFT 15 + +#define TXGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define TXGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define TXGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define TXGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define TXGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define TXGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define TXGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define TXGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define TXGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define TXGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define TXGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define TXGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define TXGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define TXGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define TXGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define TXGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define TXGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define TXGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define TXGBE_TXD_TUNNEL_UDP (0x0ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) +#define TXGBE_TXD_TUNNEL_GRE (0x1ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) + + +/************ txgbe_type.h ************/ +/* Number of Transmit and Receive Descriptors must be a multiple of 128 */ +#define TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 128 +#define TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 128 +#define TXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define TXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define TXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define TXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define TXGBE_TX_DESC_SPECIAL_PRI_SHIFT TXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor */ +union txgbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union txgbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct txgbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/************************* Flow Directory HASH *******************************/ +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union txgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union txgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +struct txgbe_ethertype_filter { + u16 rule_idx; + u64 action; + u16 ethertype; + u32 etqf; + u32 etqs; +}; + +/* Structure to store ethertype filters' info. */ +struct txgbe_etype_filter_info { + int count; + u8 ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters */ + struct txgbe_ethertype_filter etype_filters[TXGBE_MAX_PSR_ETYPE_SWC_FILTERS]; +}; + +/* Structure to store 5-tuple filters' info. */ +struct txgbe_5tuple_filter_info { + u32 fivetuple_mask[4]; /* Bit mask for max 128 filters */ +}; + +/****************** Manageablility Host Interface defines ********************/ +#define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define TXGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define TXGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define TXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define TXGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define TXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 +#define FW_DW_OPEN_NOTIFY 0xE9 +#define FW_DW_CLOSE_NOTIFY 0xEA +#define FW_PPS_SET_CMD 0xF6 +#define FW_PPS_SET_LEN 0x14 +#define FW_AN_STA_CMD 0xF3 +#define FW_AN_STA_LEN 0x1 + +#define TXGBE_CHECKSUM_CAP_ST_PASS 0x80658383 +#define TXGBE_CHECKSUM_CAP_ST_FAIL 0x70657376 + +#define TXGBE_HIC_HDR_INDEX_MAX 255 + +/* Host Interface Command Structures */ +struct txgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + union { + u8 checksum; + u8 index; + } cksum_or_index; +}; + +struct txgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + union { + u8 checksum; + u8 index; + } cksum_or_index; +}; + +struct txgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + union { + u8 checksum; + u8 index; + } cksum_or_index; +}; + +union txgbe_hic_hdr2 { + struct txgbe_hic_hdr2_req req; + struct txgbe_hic_hdr2_rsp rsp; +}; + +struct txgbe_hic_drv_info { + struct txgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct txgbe_hic_read_shadow_ram { + union txgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct txgbe_hic_write_shadow_ram { + union txgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct txgbe_hic_disable_rxen { + struct txgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_reset { + struct txgbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct txgbe_hic_phy_cfg { + struct txgbe_hic_hdr hdr; + u8 lan_id; + u8 phy_mode; + u16 phy_speed; +}; + +enum txgbe_module_id { + TXGBE_MODULE_EEPROM = 0, + TXGBE_MODULE_FIRMWARE, + TXGBE_MODULE_HARDWARE, + TXGBE_MODULE_PCIE +}; + +struct txgbe_hic_upg_start { + struct txgbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_upg_write { + struct txgbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum txgbe_upg_flag { + TXGBE_RESET_NONE = 0, + TXGBE_RESET_FIRMWARE, + TXGBE_RELOAD_EEPROM, + TXGBE_RESET_LAN +}; + +struct txgbe_hic_upg_verify { + struct txgbe_hic_hdr hdr; + u32 action_flag; +}; + +struct txgbe_hic_write_lldp{ + struct txgbe_hic_hdr hdr; + u8 func; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_set_pps { + struct txgbe_hic_hdr hdr; + u8 lan_id; + u8 enable; + u16 pad2; + u64 nsec; + u64 cycles; +}; + +struct txgbe_hic_write_autoneg { + struct txgbe_hic_hdr hdr; + u8 lan_id; + bool autoneg; + u16 pad; +}; + +struct txgbe_led_active_set { + struct txgbe_hic_hdr hdr; + u32 active_flag; +}; + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define TXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define TXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* DCB registers */ +#define TXGBE_DCB_MAX_TRAFFIC_CLASS 8 + +/* Power Management */ +/* DMA Coalescing configuration */ +struct txgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* Autonegotiation advertised speeds */ +typedef u32 txgbe_autoneg_advertised; +/* Link speed */ +#define TXGBE_LINK_SPEED_UNKNOWN 0 +#define TXGBE_LINK_SPEED_100_FULL 1 +#define TXGBE_LINK_SPEED_1GB_FULL 2 +#define TXGBE_LINK_SPEED_10GB_FULL 4 +#define TXGBE_LINK_SPEED_10_FULL 8 +#define TXGBE_LINK_SPEED_AUTONEG (TXGBE_LINK_SPEED_100_FULL | \ + TXGBE_LINK_SPEED_1GB_FULL | \ + TXGBE_LINK_SPEED_10GB_FULL | \ + TXGBE_LINK_SPEED_10_FULL) + +#define TXGBE_LINK_SPEED_25GB_FULL 0x10 +#define TXGBE_LINK_SPEED_40GB_FULL 0x20 +#define TXGBE_LINK_SPEED_50GB_FULL 0x40 + +#define TXGBE_LINK_SPEED_AMLITE_AUTONEG (TXGBE_LINK_SPEED_10GB_FULL | \ + TXGBE_LINK_SPEED_25GB_FULL) +/* Amlite eth mode */ +enum amlite_eth_mode { + ETH_RATE_10G = 0, + ETH_RATE_25G +}; + +/* Physical layer type */ +typedef u32 txgbe_physical_layer; +#define TXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define TXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define TXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define TXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define TXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define TXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define TXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define TXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define TXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define TXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define TXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define TXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define TXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define TXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define TXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + + +/* Special PHY Init Routine */ +#define TXGBE_PHY_INIT_OFFSET_NL 0x002B +#define TXGBE_PHY_INIT_END_NL 0xFFFF +#define TXGBE_CONTROL_MASK_NL 0xF000 +#define TXGBE_DATA_MASK_NL 0x0FFF +#define TXGBE_CONTROL_SHIFT_NL 12 +#define TXGBE_DELAY_NL 0 +#define TXGBE_DATA_NL 1 +#define TXGBE_CONTROL_NL 0x000F +#define TXGBE_CONTROL_EOL_NL 0x0FFF +#define TXGBE_CONTROL_SOL_NL 0x0000 + + + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define TXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define TXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define TXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define TXGBE_CABLE_DC 5556 /* Delay Copper */ +#define TXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define TXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define TXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define TXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define TXGBE_ID_X540 (TXGBE_MAC_DC + TXGBE_XAUI_DC + TXGBE_PHY_DC) + +/* Calculate Interface Delay */ +#define TXGBE_PHY_D 12800 +#define TXGBE_MAC_D 4096 +#define TXGBE_XAUI_D (2 * 1024) + +#define TXGBE_ID (TXGBE_MAC_D + TXGBE_XAUI_D + TXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define TXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define TXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define TXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (TXGBE_B2BT(_max_frame_link) + \ + TXGBE_PFC_D + \ + (2 * TXGBE_CABLE_DC) + \ + (2 * TXGBE_ID_X540) + \ + TXGBE_HD) / 25 + 1) + \ + 2 * TXGBE_B2BT(_max_frame_tc)) + + +/* Calculate delay value in bit times */ +#define TXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (TXGBE_B2BT(_max_frame_link) + \ + TXGBE_PFC_D + \ + (2 * TXGBE_CABLE_DC) + \ + (2 * TXGBE_ID) + \ + TXGBE_HD) / 25 + 1) + \ + 2 * TXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define TXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * TXGBE_B2BT(_max_frame_tc) + \ + (36 * TXGBE_PCI_DELAY / 25) + 1) + +#define TXGBE_LOW_DV(_max_frame_tc) \ + (2 * TXGBE_LOW_DV_X540(_max_frame_tc)) + + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum txgbe_fcoe_boot_status { + txgbe_fcoe_bootstatus_disabled = 0, + txgbe_fcoe_bootstatus_enabled = 1, + txgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum txgbe_eeprom_type { + txgbe_eeprom_uninitialized = 0, + txgbe_eeprom_spi, + txgbe_flash, + txgbe_eeprom_none /* No NVM support */ +}; + +enum txgbe_phy_type { + txgbe_phy_unknown = 0, + txgbe_phy_none, + txgbe_phy_tn, + txgbe_phy_aq, + txgbe_phy_cu_unknown, + txgbe_phy_qt, + txgbe_phy_xaui, + txgbe_phy_nl, + txgbe_phy_sfp_passive_tyco, + txgbe_phy_sfp_passive_unknown, + txgbe_phy_sfp_active_unknown, + txgbe_phy_sfp_avago, + txgbe_phy_sfp_ftl, + txgbe_phy_sfp_ftl_active, + txgbe_phy_sfp_unknown, + txgbe_phy_sfp_intel, + txgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + txgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum txgbe_sfp_type { + txgbe_sfp_type_da_cu = 0, + txgbe_sfp_type_sr = 1, + txgbe_sfp_type_lr = 2, + txgbe_sfp_type_da_cu_core0 = 3, + txgbe_sfp_type_da_cu_core1 = 4, + txgbe_sfp_type_srlr_core0 = 5, + txgbe_sfp_type_srlr_core1 = 6, + txgbe_sfp_type_da_act_lmt_core0 = 7, + txgbe_sfp_type_da_act_lmt_core1 = 8, + txgbe_sfp_type_1g_cu_core0 = 9, + txgbe_sfp_type_1g_cu_core1 = 10, + txgbe_sfp_type_1g_sx_core0 = 11, + txgbe_sfp_type_1g_sx_core1 = 12, + txgbe_sfp_type_1g_lx_core0 = 13, + txgbe_sfp_type_1g_lx_core1 = 14, + txgbe_sfp_type_10g_cu_core0 = 15, /* add for qi'an'xin 10G fiber2copper sfp */ + txgbe_sfp_type_10g_cu_core1 = 16, + txgbe_sfp_type_25g_sr_core0 = 17, + txgbe_sfp_type_25g_sr_core1 = 18, + txgbe_sfp_type_25g_lr_core0 = 19, + txgbe_sfp_type_25g_lr_core1 = 20, + txgbe_sfp_type_25g_aoc_core0 = 21, + txgbe_sfp_type_25g_aoc_core1 = 22, + txgbe_qsfp_type_40g_cu_core0 = 23, + txgbe_qsfp_type_40g_cu_core1 = 24, + txgbe_qsfp_type_40g_sr_core0 = 25, + txgbe_qsfp_type_40g_sr_core1 = 26, + txgbe_qsfp_type_40g_lr_core0 = 27, + txgbe_qsfp_type_40g_lr_core1 = 28, + txgbe_qsfp_type_40g_active_core0 = 29, + txgbe_qsfp_type_40g_active_core1 = 30, + txgbe_sfp_type_not_present = 0xFFFE, + txgbe_sfp_type_unknown = 0xFFFF +}; + +enum txgbe_media_type { + txgbe_media_type_unknown = 0, + txgbe_media_type_fiber, + txgbe_media_type_fiber_qsfp, + txgbe_media_type_copper, + txgbe_media_type_backplane, + txgbe_media_type_virtual, +}; + +/* Flow Control Settings */ +enum txgbe_fc_mode { + txgbe_fc_none = 0, + txgbe_fc_rx_pause, + txgbe_fc_tx_pause, + txgbe_fc_full, + txgbe_fc_default +}; + +/* Smart Speed Settings */ +#define TXGBE_SMARTSPEED_MAX_RETRIES 3 +enum txgbe_smart_speed { + txgbe_smart_speed_auto = 0, + txgbe_smart_speed_on, + txgbe_smart_speed_off +}; + +/* PCI bus types */ +enum txgbe_bus_type { + txgbe_bus_type_unknown = 0, + txgbe_bus_type_pci, + txgbe_bus_type_pcix, + txgbe_bus_type_pci_express, + txgbe_bus_type_internal, + txgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum txgbe_bus_speed { + txgbe_bus_speed_unknown = 0, + txgbe_bus_speed_33 = 33, + txgbe_bus_speed_66 = 66, + txgbe_bus_speed_100 = 100, + txgbe_bus_speed_120 = 120, + txgbe_bus_speed_133 = 133, + txgbe_bus_speed_2500 = 2500, + txgbe_bus_speed_5000 = 5000, + txgbe_bus_speed_8000 = 8000, + txgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum txgbe_bus_width { + txgbe_bus_width_unknown = 0, + txgbe_bus_width_pcie_x1 = 1, + txgbe_bus_width_pcie_x2 = 2, + txgbe_bus_width_pcie_x4 = 4, + txgbe_bus_width_pcie_x8 = 8, + txgbe_bus_width_32 = 32, + txgbe_bus_width_64 = 64, + txgbe_bus_width_reserved +}; + +struct txgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct txgbe_bus_info { + enum pci_bus_speed speed; + enum pcie_link_width width; + enum txgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct txgbe_fc_info { + u32 high_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum txgbe_fc_mode current_mode; /* FC mode in effect */ + enum txgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct txgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 rdpc; + u64 rddc; + u64 psrpc; + u64 psrdc; + u64 untag; + u64 tdmpc; + u64 tdmdc; + u64 tdbpc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct txgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct txgbe_eeprom_operations { + s32 (*init_params)(struct txgbe_hw *); + s32 (*read)(struct txgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct txgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct txgbe_hw *, u16, u16); + s32 (*write_buffer)(struct txgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct txgbe_hw *, u16 *); + s32 (*update_checksum)(struct txgbe_hw *); + s32 (*calc_checksum)(struct txgbe_hw *); +}; + +struct txgbe_flash_operations { + s32 (*init_params)(struct txgbe_hw *); + s32 (*read_buffer)(struct txgbe_hw *, u32, u32, u32 *); + s32 (*write_buffer)(struct txgbe_hw *, u32, u32, u32 *); +}; + +struct txgbe_mac_operations { + s32 (*init_hw)(struct txgbe_hw *); + s32 (*reset_hw)(struct txgbe_hw *); + s32 (*start_hw)(struct txgbe_hw *); + s32 (*clear_hw_cntrs)(struct txgbe_hw *); + enum txgbe_media_type (*get_media_type)(struct txgbe_hw *); + s32 (*get_mac_addr)(struct txgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct txgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct txgbe_hw *, u8 *); + s32 (*get_device_caps)(struct txgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct txgbe_hw *, u16 *, u16 *); + s32 (*stop_adapter)(struct txgbe_hw *); + s32 (*get_bus_info)(struct txgbe_hw *); + void (*set_lan_id)(struct txgbe_hw *); + s32 (*setup_sfp)(struct txgbe_hw *); + s32 (*enable_rx_dma)(struct txgbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct txgbe_hw *); + s32 (*enable_sec_rx_path)(struct txgbe_hw *); + s32 (*disable_sec_tx_path)(struct txgbe_hw *); + s32 (*enable_sec_tx_path)(struct txgbe_hw *); + s32 (*acquire_swfw_sync)(struct txgbe_hw *, u32); + void (*release_swfw_sync)(struct txgbe_hw *, u32); + + /* Link */ + void (*disable_tx_laser)(struct txgbe_hw *); + void (*enable_tx_laser)(struct txgbe_hw *); + void (*flap_tx_laser)(struct txgbe_hw *); + s32 (*setup_link)(struct txgbe_hw *, u32, bool); + s32 (*setup_mac_link)(struct txgbe_hw *, u32, bool); + s32 (*check_link)(struct txgbe_hw *, u32 *, bool *, bool); + s32 (*get_link_capabilities)(struct txgbe_hw *, u32 *, + bool *); + void (*set_rate_select_speed)(struct txgbe_hw *, u32); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct txgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct txgbe_hw *, u32); + s32 (*led_off)(struct txgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct txgbe_hw *, u32, u8 *, u64, u32); + s32 (*clear_rar)(struct txgbe_hw *, u32); + s32 (*insert_mac_addr)(struct txgbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct txgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct txgbe_hw *, u32); + s32 (*clear_vmdq)(struct txgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct txgbe_hw *); + s32 (*update_uc_addr_list)(struct txgbe_hw *, u8 *, u32, + txgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct txgbe_hw *, u8 *, u32, + txgbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct txgbe_hw *); + s32 (*disable_mc)(struct txgbe_hw *); + s32 (*clear_vfta)(struct txgbe_hw *); + s32 (*set_vfta)(struct txgbe_hw *, u32, u32, bool); + s32 (*set_vlvf)(struct txgbe_hw *, u32, u32, bool, bool *); + s32 (*init_uta_tables)(struct txgbe_hw *); + void (*set_mac_anti_spoofing)(struct txgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct txgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct txgbe_hw *); + s32 (*setup_fc)(struct txgbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct txgbe_hw *, u8, u8, u8, u8); + s32 (*get_thermal_sensor_data)(struct txgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw); + void (*get_rtrup2tc)(struct txgbe_hw *hw, u8 *map); + void (*disable_rx)(struct txgbe_hw *hw); + void (*enable_rx)(struct txgbe_hw *hw); + void (*set_source_address_pruning)(struct txgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct txgbe_hw *, bool, int); + s32 (*dmac_config)(struct txgbe_hw *hw); + s32 (*setup_eee)(struct txgbe_hw *hw, bool enable_eee); +}; + +struct txgbe_phy_operations { + s32 (*identify)(struct txgbe_hw *); + s32 (*identify_sfp)(struct txgbe_hw *); + s32 (*setup_sfp)(struct txgbe_hw *); + s32 (*init)(struct txgbe_hw *); + s32 (*reset)(struct txgbe_hw *); + s32 (*read_reg)(struct txgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct txgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct txgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct txgbe_hw *, u32, u32, u16); + u32 (*setup_link)(struct txgbe_hw *, u32, bool); + s32 (*setup_internal_link)(struct txgbe_hw *); + u32 (*setup_link_speed)(struct txgbe_hw *, u32, bool); + s32 (*check_link)(struct txgbe_hw *, u32 *, bool *); + s32 (*get_firmware_version)(struct txgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct txgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct txgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct txgbe_hw *, u8, u8 *); + s32 (*read_i2c_sff8636)(struct txgbe_hw *, u8, u8, u8 *); + s32 (*read_i2c_eeprom)(struct txgbe_hw *, u8, u8 *); + s32 (*read_i2c_sfp_phy)(struct txgbe_hw *, u16, u16 *); + s32 (*write_i2c_eeprom)(struct txgbe_hw *, u8, u8); + s32 (*check_overtemp)(struct txgbe_hw *); +}; + +struct txgbe_eeprom_info { + struct txgbe_eeprom_operations ops; + enum txgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; + u16 sw_region_offset; +}; + +struct txgbe_flash_info { + struct txgbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + +enum txgbe_mac_type { + txgbe_mac_unknown = 0, + txgbe_mac_sp, + txgbe_mac_aml, + txgbe_mac_aml40 +}; + +#define TXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct txgbe_mac_info { + enum txgbe_mac_type type; + + struct txgbe_mac_operations ops; + u8 addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define TXGBE_MAX_MTA 128 +#define TXGBE_MAX_VFTA_ENTRIES 128 + u32 mta_shadow[TXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_shadow[TXGBE_MAX_VFTA_ENTRIES]; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 san_mac_rar_index; + bool get_link_status; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct txgbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct txgbe_dmac_config dmac_config; + bool set_lben; + bool autoneg; +}; + +struct txgbe_phy_info { + struct txgbe_phy_operations ops; + enum txgbe_phy_type type; + u32 addr; + u32 id; + enum txgbe_sfp_type sfp_type; + u32 fiber_suppport_speed; + bool sfp_setup_needed; + u32 revision; + enum txgbe_media_type media_type; + u32 phy_semaphore_mask; + u8 lan_id; /* to be delete */ + txgbe_autoneg_advertised autoneg_advertised; + enum txgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + txgbe_physical_layer link_mode; +}; + +#include "txgbe_mbx.h" + +struct txgbe_mbx_operations { + void (*init_params)(struct txgbe_hw *hw); + s32 (*read)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct txgbe_hw *, u16); + s32 (*check_for_ack)(struct txgbe_hw *, u16); + s32 (*check_for_rst)(struct txgbe_hw *, u16); +}; + +struct phytxeq { + u32 main; //TX EQ main (bit[5:0]) + u32 pre1; //TX EQ pre1 (bit[5:0]) + u32 pre2; //TX EQ pre2 (bit[5:0]) + u32 post; //TX EQ post (bit[5:0]) +}; + +struct txgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct txgbe_mbx_info { + struct txgbe_mbx_operations ops; + struct txgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +enum txgbe_reset_type { + TXGBE_LAN_RESET = 0, + TXGBE_SW_RESET, + TXGBE_GLOBAL_RESET +}; + +enum txgbe_link_status { + TXGBE_LINK_STATUS_NONE = 0, + TXGBE_LINK_STATUS_KX, + TXGBE_LINK_STATUS_KX4 +}; + +struct txgbe_hw { + u8 IOMEM *hw_addr; + void *back; + struct txgbe_mac_info mac; + struct txgbe_addr_filter_info addr_ctrl; + struct txgbe_fc_info fc; + struct txgbe_phy_info phy; + struct txgbe_eeprom_info eeprom; + struct txgbe_flash_info flash; + struct txgbe_bus_info bus; + struct txgbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + enum txgbe_reset_type reset_type; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + bool Fdir_enabled; + MTD_DEV phy_dev; + enum txgbe_link_status link_status; + u16 tpid[8]; + u16 oem_ssid; + u16 oem_svid; + bool f2c_mod_status; /* fiber to copper modules internal phy link status */ + bool dac_sfp; /* force dac sfp to kr mode */ + bool bypassCtle; /* DAC cable length */ + u32 q_tx_regs[512]; +}; + +#define TCALL(hw, func, args...) (((hw)->func != NULL) \ + ? (hw)->func((hw), ##args) : TXGBE_NOT_IMPLEMENTED) + +/* Error Codes */ +#define TXGBE_ERR 100 +#define TXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-TXGBE_ERR, TXGBE_ERR): reserved for non-txgbe defined error code */ +#define TXGBE_ERR_NOSUPP -(TXGBE_ERR+0) +#define TXGBE_ERR_EEPROM -(TXGBE_ERR+1) +#define TXGBE_ERR_EEPROM_CHECKSUM -(TXGBE_ERR+2) +#define TXGBE_ERR_PHY -(TXGBE_ERR+3) +#define TXGBE_ERR_CONFIG -(TXGBE_ERR+4) +#define TXGBE_ERR_PARAM -(TXGBE_ERR+5) +#define TXGBE_ERR_MAC_TYPE -(TXGBE_ERR+6) +#define TXGBE_ERR_UNKNOWN_PHY -(TXGBE_ERR+7) +#define TXGBE_ERR_LINK_SETUP -(TXGBE_ERR+8) +#define TXGBE_ERR_ADAPTER_STOPPED -(TXGBE_ERR+09) +#define TXGBE_ERR_INVALID_MAC_ADDR -(TXGBE_ERR+10) +#define TXGBE_ERR_DEVICE_NOT_SUPPORTED -(TXGBE_ERR+11) +#define TXGBE_ERR_MASTER_REQUESTS_PENDING -(TXGBE_ERR+12) +#define TXGBE_ERR_INVALID_LINK_SETTINGS -(TXGBE_ERR+13) +#define TXGBE_ERR_AUTONEG_NOT_COMPLETE -(TXGBE_ERR+14) +#define TXGBE_ERR_RESET_FAILED -(TXGBE_ERR+15) +#define TXGBE_ERR_SWFW_SYNC -(TXGBE_ERR+16) +#define TXGBE_ERR_PHY_ADDR_INVALID -(TXGBE_ERR+17) +#define TXGBE_ERR_I2C -(TXGBE_ERR+18) +#define TXGBE_ERR_SFP_NOT_SUPPORTED -(TXGBE_ERR+19) +#define TXGBE_ERR_SFP_NOT_PRESENT -(TXGBE_ERR+20) +#define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(TXGBE_ERR+21) +#define TXGBE_ERR_NO_SAN_ADDR_PTR -(TXGBE_ERR+22) +#define TXGBE_ERR_FDIR_REINIT_FAILED -(TXGBE_ERR+23) +#define TXGBE_ERR_EEPROM_VERSION -(TXGBE_ERR+24) +#define TXGBE_ERR_NO_SPACE -(TXGBE_ERR+25) +#define TXGBE_ERR_OVERTEMP -(TXGBE_ERR+26) +#define TXGBE_ERR_UNDERTEMP -(TXGBE_ERR+27) +#define TXGBE_ERR_FC_NOT_NEGOTIATED -(TXGBE_ERR+28) +#define TXGBE_ERR_FC_NOT_SUPPORTED -(TXGBE_ERR+29) +#define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE -(TXGBE_ERR+30) +#define TXGBE_ERR_PBA_SECTION -(TXGBE_ERR+31) +#define TXGBE_ERR_INVALID_ARGUMENT -(TXGBE_ERR+32) +#define TXGBE_ERR_HOST_INTERFACE_COMMAND -(TXGBE_ERR+33) +#define TXGBE_ERR_OUT_OF_MEM -(TXGBE_ERR+34) +#define TXGBE_ERR_FEATURE_NOT_SUPPORTED -(TXGBE_ERR+36) +#define TXGBE_ERR_EEPROM_PROTECTED_REGION -(TXGBE_ERR+37) +#define TXGBE_ERR_FDIR_CMD_INCOMPLETE -(TXGBE_ERR+38) +#define TXGBE_ERR_FLASH_LOADING_FAILED -(TXGBE_ERR+39) +#define TXGBE_ERR_XPCS_POWER_UP_FAILED -(TXGBE_ERR+40) +#define TXGBE_ERR_FW_RESP_INVALID -(TXGBE_ERR+41) +#define TXGBE_ERR_PHY_INIT_NOT_DONE -(TXGBE_ERR+42) +#define TXGBE_ERR_TIMEOUT -(TXGBE_ERR+43) +#define TXGBE_ERR_TOKEN_RETRY -(TXGBE_ERR+44) +#define TXGBE_ERR_REGISTER -(TXGBE_ERR+45) +#define TXGBE_ERR_MBX -(TXGBE_ERR+46) +#define TXGBE_ERR_MNG_ACCESS_FAILED -(TXGBE_ERR+47) + +/** + * register operations + **/ +/* read register */ +#define TXGBE_DEAD_READ_RETRIES 10 +#define TXGBE_DEAD_READ_REG 0xdeadbeefU +#define TXGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL +#define TXGBE_FAILED_READ_REG 0xffffffffU +#define TXGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +#define TXGBE_LLDP_REG 0xf1000 +#define TXGBE_LLDP_ON 0x0000000f + +static inline bool TXGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +txgbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct txgbe_hw *hw, u32 reg) { - struct wx *wx = netdev_priv(netdev); + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; - return wx->priv; + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base + reg); + + return val; } +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) -#define NODE_PROP(_NAME, _PROP) \ - (const struct software_node) { \ - .name = _NAME, \ - .properties = _PROP, \ - } - -enum txgbe_swnodes { - SWNODE_GPIO = 0, - SWNODE_I2C, - SWNODE_SFP, - SWNODE_PHYLINK, - SWNODE_MAX -}; - -struct txgbe_nodes { - char gpio_name[32]; - char i2c_name[32]; - char sfp_name[32]; - char phylink_name[32]; - struct property_entry gpio_props[1]; - struct property_entry i2c_props[3]; - struct property_entry sfp_props[8]; - struct property_entry phylink_props[2]; - struct software_node_ref_args i2c_ref[1]; - struct software_node_ref_args gpio0_ref[1]; - struct software_node_ref_args gpio1_ref[1]; - struct software_node_ref_args gpio2_ref[1]; - struct software_node_ref_args gpio3_ref[1]; - struct software_node_ref_args gpio4_ref[1]; - struct software_node_ref_args gpio5_ref[1]; - struct software_node_ref_args sfp_ref[1]; - struct software_node swnodes[SWNODE_MAX]; - const struct software_node *group[SWNODE_MAX + 1]; -}; - -struct txgbe { - struct wx *wx; - struct txgbe_nodes nodes; - struct dw_xpcs *xpcs; - struct phylink *phylink; - struct platform_device *sfp_dev; - struct platform_device *i2c_dev; - struct clk_lookup *clock; - struct clk *clk; - struct gpio_chip *gpio; -}; +static inline u32 +rd32m(struct txgbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base + reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +txgbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct txgbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + txgbe_wr32(base + reg, val); +} +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = txgbe_rd32(base + reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + txgbe_wr32(base + reg, val); +} + +/* poll register */ +#define TXGBE_MDIO_TIMEOUT 1000 +#define TXGBE_I2C_TIMEOUT 1000 +#define TXGBE_SPI_TIMEOUT 1000 +static inline s32 +po32m(struct txgbe_hw *hw, u32 reg, + u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + usec_delay(usecs); + } while (true); + + return (count - loop <= count ? 0 : TXGBE_ERR_TIMEOUT); +} + +#define TXGBE_WRITE_FLUSH(H) rd32(H, TXGBE_MIS_PWR) #endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c new file mode 100644 index 0000000000000000000000000000000000000000..b33ed67c6460b5d52084fc23c67635535adb3534 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c @@ -0,0 +1,1214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2022 Intel Corporation. */ + +#include "txgbe.h" + +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +#else +#include +#endif +#endif +#ifdef HAVE_XDP_BUFF_RXQ +#include +#endif + +#include "txgbe_xsk.h" +#ifdef HAVE_AF_XDP_ZC_SUPPORT +static void txgbe_disable_txr_hw(struct txgbe_adapter *adapter, + struct txgbe_ring *tx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = tx_ring->reg_idx; + u32 txdctl; + + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH);; + + /* delay mechanism from txgbe_disable_tx */ + msleep(10); + + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + + if (!(txdctl & TXGBE_PX_TR_CFG_ENABLE)) + return; + + e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); +} + + +static void txgbe_disable_rxr_hw(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = rx_ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + rxdctl &= ~TXGBE_PX_RR_CFG_RR_EN; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); + TXGBE_WRITE_FLUSH(hw); + + msleep(10); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + if (!(rxdctl & TXGBE_PX_RR_CFG_RR_EN)) + return; + + e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void txgbe_disable_txr(struct txgbe_adapter *adapter, + struct txgbe_ring *tx_ring) +{ + set_bit(__TXGBE_TX_DISABLED, &tx_ring->state); + txgbe_disable_txr_hw(adapter, tx_ring); +} + + +static void txgbe_reset_txr_stats(struct txgbe_ring *tx_ring) +{ + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void txgbe_reset_rxr_stats(struct txgbe_ring *rx_ring) +{ + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); +} + +/** + * txgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function disables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void txgbe_txrx_ring_disable(struct txgbe_adapter *adapter, int ring) +{ + struct txgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + txgbe_disable_txr(adapter, tx_ring); + if (xdp_ring) + txgbe_disable_txr(adapter, xdp_ring); + txgbe_disable_rxr_hw(adapter, rx_ring); + + if (xdp_ring) + synchronize_rcu(); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + txgbe_clean_tx_ring(tx_ring); + if (xdp_ring) + txgbe_clean_tx_ring(xdp_ring); + txgbe_clean_rx_ring(rx_ring); + + txgbe_reset_txr_stats(tx_ring); + if (xdp_ring) + txgbe_reset_txr_stats(xdp_ring); + txgbe_reset_rxr_stats(rx_ring); +} + +/** + * txgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function enables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void txgbe_txrx_ring_enable(struct txgbe_adapter *adapter, int ring) +{ + struct txgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + + txgbe_configure_tx_ring(adapter, tx_ring); + if (xdp_ring) + txgbe_configure_tx_ring(adapter, xdp_ring); + txgbe_configure_rx_ring(adapter, rx_ring); + + clear_bit(__TXGBE_TX_DISABLED, &tx_ring->state); + if (xdp_ring) + clear_bit(__TXGBE_TX_DISABLED, &xdp_ring->state); +} + +#ifndef HAVE_NETDEV_BPF_XSK_POOL +struct xdp_umem *txgbe_xsk_umem(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +#else +struct xsk_buff_pool *txgbe_xsk_umem(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +#endif /* HAVE_NETDEV_BPF_XSK_POOL */ +{ + bool xdp_on = READ_ONCE(adapter->xdp_prog); + int qid = ring->queue_index; + + if (!adapter->xsk_pools || !adapter->xsk_pools[qid] || + qid >= adapter->num_xsk_pools || !xdp_on || + !test_bit(qid, adapter->af_xdp_zc_qps)) + return NULL; + return adapter->xsk_pools[qid]; +} + +static int txgbe_alloc_xsk_umems(struct txgbe_adapter *adapter) +{ + if (adapter->xsk_pools) + return 0; + + adapter->num_xsk_pools_used = 0; + adapter->num_xsk_pools = adapter->num_rx_queues; + adapter->xsk_pools = kcalloc(adapter->num_xsk_pools, + sizeof(*adapter->xsk_pools), + GFP_KERNEL); + if (!adapter->xsk_pools) { + adapter->num_xsk_pools = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * txgbe_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached + * @adapter: adapter + * + * Returns true if any of the Rx rings has an AF_XDP UMEM attached + **/ +bool txgbe_xsk_any_rx_ring_enabled(struct txgbe_adapter *adapter) +{ + int i; + + if (!adapter->xsk_pools) + return false; + + for (i = 0; i < adapter->num_xsk_pools; i++) { + if (adapter->xsk_pools[i]) + return true; + } + + return false; +} + +#ifndef HAVE_NETDEV_BPF_XSK_POOL +static int txgbe_add_xsk_umem(struct txgbe_adapter *adapter, + struct xdp_umem *pool, + u16 qid) +#else +static int txgbe_add_xsk_umem(struct txgbe_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +#endif +{ + int err; + + err = txgbe_alloc_xsk_umems(adapter); + if (err) + return err; + + adapter->xsk_pools[qid] = pool; + adapter->num_xsk_pools_used++; + + return 0; +} + +static void txgbe_remove_xsk_umem(struct txgbe_adapter *adapter, u16 qid) +{ + adapter->xsk_pools[qid] = NULL; + adapter->num_xsk_pools_used--; + + if (adapter->num_xsk_pools == 0) { + kfree(adapter->xsk_pools); + adapter->xsk_pools = NULL; + adapter->num_xsk_pools = 0; + } +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +static int txgbe_xsk_umem_dma_map(struct txgbe_adapter *adapter, + struct xdp_umem *pool) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i, j; + dma_addr_t dma; + + for (i = 0; i < pool->npgs; i++) { + dma = dma_map_page_attrs(dev, pool->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, TXGBE_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + pool->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, pool->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, TXGBE_RX_DMA_ATTR); + pool->pages[i].dma = 0; + } + + return -1; +} + +static void txgbe_xsk_umem_dma_unmap(struct txgbe_adapter *adapter, + struct xdp_umem *pool) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i; + + for (i = 0; i < pool->npgs; i++) { + dma_unmap_page_attrs(dev, pool->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, TXGBE_RX_DMA_ATTR); + + pool->pages[i].dma = 0; + } +} +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + +#ifndef HAVE_NETDEV_BPF_XSK_POOL +static int txgbe_xsk_umem_enable(struct txgbe_adapter *adapter, + struct xdp_umem *pool, + u16 qid) +#else +static int txgbe_xsk_umem_enable(struct txgbe_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +#endif +{ +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_umem_fq_reuse *reuseq; +#endif + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_pools) { + if (qid >= adapter->num_xsk_pools) + return -EINVAL; + if (adapter->xsk_pools[qid]) + return -EBUSY; + } + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(pool, reuseq)); + + err = txgbe_xsk_umem_dma_map(adapter, pool); +#else + err = xsk_pool_dma_map(pool, &adapter->pdev->dev, TXGBE_RX_DMA_ATTR); +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + if (err) + return err; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + txgbe_txrx_ring_disable(adapter, qid); + + /*to avoid xsk fd get issue in some kernel version*/ + msleep(400); + + set_bit(qid, adapter->af_xdp_zc_qps); + err = txgbe_add_xsk_umem(adapter, pool, qid); + if (err) + return err; + + if (if_running) { + txgbe_txrx_ring_enable(adapter, qid); + + /* Kick start the NAPI context so that receiving will start */ +#ifdef HAVE_NDO_XSK_WAKEUP + err = txgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); +#else + err = txgbe_xsk_async_xmit(adapter->netdev, qid); +#endif + + if (err) + return err; + } + + return 0; +} + +static int txgbe_xsk_umem_disable(struct txgbe_adapter *adapter, u16 qid) +{ + bool if_running; + + if (!adapter->xsk_pools || qid >= adapter->num_xsk_pools || + !adapter->xsk_pools[qid]) + return -EINVAL; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + txgbe_txrx_ring_disable(adapter, qid); + + clear_bit(qid, adapter->af_xdp_zc_qps); + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + txgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_pools[qid]); +#else + xsk_pool_dma_unmap(adapter->xsk_pools[qid], TXGBE_RX_DMA_ATTR); +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + txgbe_remove_xsk_umem(adapter, qid); + + if (if_running) + txgbe_txrx_ring_enable(adapter, qid); + + return 0; +} + +#ifndef HAVE_NETDEV_BPF_XSK_POOL +int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xdp_umem *pool, + u16 qid) +#else +int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xsk_buff_pool *pool, + u16 qid) +#endif +{ + return pool ? txgbe_xsk_umem_enable(adapter, pool, qid) : + txgbe_xsk_umem_disable(adapter, qid); +} + +static int txgbe_run_xdp_zc(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = TXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct txgbe_ring *ring; + struct xdp_frame *xdpf; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + xdp->handle += xdp->data - xdp->data_hard_start; +#endif + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = TXGBE_XDP_CONSUMED; + break; + } + ring = adapter->xdp_ring[smp_processor_id() % MAX_XDP_QUEUES]; + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); +#ifdef HAVE_XDP_FRAME_STRUCT + result = txgbe_xmit_xdp_ring(ring, xdpf); +#else + result = txgbe_xmit_xdp_ring(ring, xdp); +#endif /* HAVE_XDP_FRAME_STRUCT */ + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? TXGBE_XDP_REDIR : TXGBE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + fallthrough; + case XDP_DROP: + result = TXGBE_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +static struct +txgbe_rx_buffer *txgbe_get_rx_buffer_zc(struct txgbe_ring *rx_ring, + unsigned int size) +{ + struct txgbe_rx_buffer *bi; + + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +static void txgbe_reuse_rx_buffer_zc(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *obi) +{ + unsigned long mask = (unsigned long)rx_ring->xsk_pool->chunk_mask; + u64 hr = rx_ring->xsk_pool->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + struct txgbe_rx_buffer *nbi; + + nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + nbi->dma = obi->dma & mask; + nbi->dma += hr; + + nbi->addr = (void *)((unsigned long)obi->addr & mask); + nbi->addr += hr; + + nbi->handle = obi->handle & mask; + nbi->handle += rx_ring->xsk_pool->headroom; + + obi->addr = NULL; + obi->skb = NULL; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +void txgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct txgbe_rx_buffer *bi; + struct txgbe_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct txgbe_ring, zca); + hr = rx_ring->xsk_pool->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_pool->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = rx_ring->rx_buffer_info; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_pool, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_pool, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_pool->headroom; +} +#endif + +static bool txgbe_alloc_buffer_zc(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ +#ifndef HAVE_NETDEV_BPF_XSK_POOL + struct xdp_umem *umem = rx_ring->xsk_pool; +#endif + void *addr = bi->addr; + u64 handle, hr; + + if (addr) + return true; + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_release_addr(umem); + return true; +} + +static bool txgbe_alloc_buffer_slow_zc(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ +#ifndef HAVE_NETDEV_BPF_XSK_POOL + struct xdp_umem *pool = rx_ring->xsk_pool; +#else + struct xsk_buff_pool *pool = rx_ring->xsk_pool; +#endif + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(pool, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + handle &= rx_ring->xsk_pool->chunk_mask; + + hr = pool->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(pool, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(pool, handle); + bi->addr += hr; + + bi->handle = handle + pool->headroom; + + xsk_umem_release_addr_rq(pool); + return true; +} + +static __always_inline bool +__txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 count, + bool alloc(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi)) +#else +bool txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 count) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + dma_addr_t dma; +#endif + bool ok = true; + + /* nothing to do */ + if (!count) + return true; + + rx_desc = TXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + if (!alloc(rx_ring, bi)) { +#else + bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); + if (!bi->xdp) { +#endif + ok = false; + break; + } + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); +#else + dma = xsk_buff_xdp_get_dma(bi->xdp); +#endif + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else + rx_desc->read.pkt_addr = cpu_to_le64(dma); +#endif + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = TXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + count--; + } while (count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; +#endif + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } + + return ok; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +void txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 count) +{ + __txgbe_alloc_rx_buffers_zc(rx_ring, count, + txgbe_alloc_buffer_slow_zc); +} + +static bool txgbe_alloc_rx_buffers_fast_zc(struct txgbe_ring *rx_ring, + u16 count) +{ + return __txgbe_alloc_rx_buffers_zc(rx_ring, count, + txgbe_alloc_buffer_zc); +} +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + +static struct sk_buff *txgbe_construct_skb_zc(struct txgbe_ring *rx_ring, +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct txgbe_rx_buffer *bi, + struct xdp_buff *xdp) +#else + struct txgbe_rx_buffer *bi) +#endif +{ +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_buff *xdp_buffer = xdp; +#else + struct xdp_buff *xdp_buffer = bi->xdp; +#endif + unsigned int metasize = xdp_buffer->data - xdp_buffer->data_meta; + unsigned int datasize = xdp_buffer->data_end - xdp_buffer->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ +#ifdef NEED_NAPI_ALLOC_SKB_NO_MASK + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + xdp_buffer->data_end - xdp_buffer->data_hard_start); +#else + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp_buffer->data_end - xdp_buffer->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); +#endif + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp_buffer->data - xdp_buffer->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp_buffer->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + txgbe_reuse_rx_buffer_zc(rx_ring, bi); +#else + xsk_buff_free(xdp_buffer); + bi->xdp = NULL; +#endif + return skb; +} + +static void txgbe_inc_ntc(struct txgbe_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(TXGBE_RX_DESC(rx_ring, ntc)); +} + +int txgbe_clean_rx_irq_zc(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct txgbe_adapter *adapter = q_vector->adapter; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; +#endif + + + while (likely(total_rx_packets < budget)) { + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + failure = failure || +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + !txgbe_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); +#else + !txgbe_alloc_rx_buffers_zc(rx_ring, + cleaned_count); +#endif + cleaned_count = 0; + } + + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi = txgbe_get_rx_buffer_zc(rx_ring, size); +#else + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#endif + + if (unlikely(!txgbe_test_staterr(rx_desc, + TXGBE_RXD_STAT_EOP))) { + struct txgbe_rx_buffer *next_bi; + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + txgbe_reuse_rx_buffer_zc(rx_ring, bi); +#else + xsk_buff_free(bi->xdp); + bi->xdp = NULL; +#endif + txgbe_inc_ntc(rx_ring); + next_bi = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + next_bi->skb = ERR_PTR(-EINVAL); +#else + next_bi->discard = true; +#endif + continue; + } + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + if (unlikely(bi->skb)) { + txgbe_reuse_rx_buffer_zc(rx_ring, bi); +#else + if (unlikely(bi->discard)) { + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + bi->discard = false; +#endif + txgbe_inc_ntc(rx_ring); + continue; + } + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = txgbe_run_xdp_zc(adapter, rx_ring, &xdp); +#else + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); + xdp_res = txgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); +#endif + + if (xdp_res) { + if (xdp_res & (TXGBE_XDP_TX | TXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi->addr = NULL; + bi->skb = NULL; +#endif + } else { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + txgbe_reuse_rx_buffer_zc(rx_ring, bi); +#else + xsk_buff_free(bi->xdp); +#endif + } +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi->xdp = NULL; +#endif + total_rx_packets++; + total_rx_bytes += size; + + cleaned_count++; + txgbe_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + skb = txgbe_construct_skb_zc(rx_ring, bi, &xdp); +#else + skb = txgbe_construct_skb_zc(rx_ring, bi); +#endif + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + cleaned_count++; + txgbe_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + txgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + } + + if (xdp_xmit & TXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & TXGBE_XDP_TX) { + struct txgbe_ring *ring = adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues]; + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + wmb(); + writel(ring->next_to_use, ring->tail); + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return failure ? budget : (int)total_rx_packets; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct txgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; + + while (i != rx_ring->next_to_alloc) { + xsk_umem_fq_reuse(rx_ring->xsk_pool, bi->handle); + i++; + bi++; + if (i == rx_ring->count) { + i = 0; + bi = rx_ring->rx_buffer_info; + } + } +} +#else +void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + struct txgbe_rx_buffer *bi; + u16 i; + + for (i = 0; i < rx_ring->count; i++) { + bi = &rx_ring->rx_buffer_info[i]; + + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} +#endif + +static bool txgbe_xmit_zc(struct txgbe_ring *xdp_ring, unsigned int budget) +{ + unsigned int sent_frames = 0, total_bytes = 0; + union txgbe_tx_desc *tx_desc = NULL; + u16 ntu = xdp_ring->next_to_use; + struct txgbe_tx_buffer *tx_bi; + bool work_done = true; +#ifdef XSK_UMEM_RETURNS_XDP_DESC + struct xdp_desc desc; +#endif + dma_addr_t dma; + u32 cmd_type; +#ifndef XSK_UMEM_RETURNS_XDP_DESC + u32 len; +#endif + while (budget-- > 0) { + if (unlikely(!txgbe_desc_unused(xdp_ring))) { + work_done = false; + break; + } + + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + +#ifdef XSK_UMEM_RETURNS_XDP_DESC + if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) + break; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + dma = xdp_umem_get_dma(xdp_ring->xsk_pool, desc.addr); + dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, + DMA_BIDIRECTIONAL); +#else + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, + desc.len); +#endif + + tx_bi = &xdp_ring->tx_buffer_info[ntu]; + tx_bi->bytecount = desc.len; + tx_bi->gso_segs = 1; + tx_bi->xdpf = NULL; +#else + if (!xsk_umem_consume_tx(xdp_ring->xsk_pool, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_buffer_info[ntu]; + tx_bi->bytecount = len; + tx_bi->gso_segs = 1; + tx_bi->xdpf = NULL; + +#endif + tx_desc = TXGBE_TX_DESC(xdp_ring, ntu); + tx_desc->read.olinfo_status = 0; + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = txgbe_tx_cmd_type(tx_bi->tx_flags); +#ifdef XSK_UMEM_RETURNS_XDP_DESC + cmd_type |= desc.len | TXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(desc.len << TXGBE_TXD_PAYLEN_SHIFT); +#else + cmd_type |= len | TXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << TXGBE_TXD_PAYLEN_SHIFT); +#endif + smp_wmb(); + tx_bi->next_to_watch = tx_desc; +#ifdef TXGBE_TXHEAD_WB + tx_bi->next_eop = ntu; +#endif + xdp_ring->next_rs_idx = ntu; + ntu++; + if (ntu == xdp_ring->count) + ntu = 0; + xdp_ring->next_to_use = ntu; + + sent_frames++; + total_bytes += tx_bi->bytecount; + } + if (tx_desc) { + cmd_type |= TXGBE_TXD_RS; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + wmb(); + writel(xdp_ring->next_to_use, xdp_ring->tail); + xsk_tx_release(xdp_ring->xsk_pool); + + u64_stats_update_begin(&xdp_ring->syncp); + xdp_ring->stats.bytes += total_bytes; + xdp_ring->stats.packets += sent_frames; + u64_stats_update_end(&xdp_ring->syncp); + xdp_ring->q_vector->tx.total_bytes += total_bytes; + xdp_ring->q_vector->tx.total_packets += sent_frames; + } + + return (budget > 0) && work_done; +} + +static void txgbe_clean_xdp_tx_buffer(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + tx_ring->xdp_tx_active--; + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); + tx_bi->va = NULL; +} + +bool txgbe_clean_xdp_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring) +{ + u32 next_rs_idx = tx_ring->next_rs_idx; + union txgbe_tx_desc *next_rs_desc; + u32 ntc = tx_ring->next_to_clean; + struct txgbe_tx_buffer *tx_bi; + u16 frames_ready = 0; + u32 xsk_frames = 0; + u16 i; + +#ifdef TXGBE_TXHEAD_WB + u32 head = 0; + u32 temp = tx_ring->next_to_clean; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + head = *(tx_ring->headwb_mem); +#endif +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* we have caught up to head, no work left to do */ + if (temp == head) { + goto out_xmit; + } else if (head > temp && !(next_rs_idx >= temp && (next_rs_idx < head))) { + goto out_xmit; + } else if (!(next_rs_idx >= temp || (next_rs_idx < head))) { + goto out_xmit; + } else { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } + } else { + next_rs_desc = TXGBE_TX_DESC(tx_ring, next_rs_idx); + if (next_rs_desc->wb.status & + cpu_to_le32(TXGBE_TXD_STAT_DD)) { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } + } +#else + next_rs_desc = TXGBE_TX_DESC(tx_ring, next_rs_idx); + if (next_rs_desc->wb.status & + cpu_to_le32(TXGBE_TXD_STAT_DD)) { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } +#endif + if (!frames_ready) + goto out_xmit; + + if (likely(!tx_ring->xdp_tx_active)) { + xsk_frames = frames_ready; + } else { + for (i = 0; i < frames_ready; i++) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + txgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ++ntc; + if (ntc >= tx_ring->count) + ntc = 0; + } + } + + tx_ring->next_to_clean += frames_ready; + if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) + tx_ring->next_to_clean -= tx_ring->count; + + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + +out_xmit: + return txgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); +} + +#ifdef HAVE_NDO_XSK_WAKEUP +int txgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 __maybe_unused flags) +#else +int txgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +#endif +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_ring *ring; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return -ENETDOWN; + if (!READ_ONCE(adapter->xdp_prog)) + return -ENXIO; + + if (qid >= adapter->num_xdp_queues) + return -ENXIO; + + if (!adapter->xsk_pools || !adapter->xsk_pools[qid]) + return -ENXIO; + + ring = adapter->xdp_ring[qid]; + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + if (likely(napi_schedule_prep(&ring->q_vector->napi))) + __napi_schedule(&ring->q_vector->napi); + } + + return 0; +} + +void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring) +{ + unsigned long size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct txgbe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + txgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc == tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h new file mode 100644 index 0000000000000000000000000000000000000000..b844de458b3bbf9b7029a890a66bf73a1dcc9823 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2022 Intel Corporation. */ + +#ifndef _TXGBE_TXRX_COMMON_H_ +#define _TXGBE_TXRX_COMMON_H_ + +#ifndef TXGBE_TXD_CMD +#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \ + TXGBE_TXD_RS) +#endif + +#define TXGBE_XDP_PASS 0 +#define TXGBE_XDP_CONSUMED BIT(0) +#define TXGBE_XDP_TX BIT(1) +#define TXGBE_XDP_REDIR BIT(2) + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_FRAME_STRUCT +int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_frame *xdpf); +#else +int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_buff *xdp); +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT +void txgbe_txrx_ring_disable(struct txgbe_adapter *adapter, int ring); +void txgbe_txrx_ring_enable(struct txgbe_adapter *adapter, int ring); +#ifndef HAVE_NETDEV_BPF_XSK_POOL +struct xdp_umem *txgbe_xsk_umem(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid); +#else +struct xsk_buff_pool *txgbe_xsk_umem(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xsk_buff_pool *umem, + u16 qid); +#endif /* HAVE_NETDEV_BPF_XSK_POOL */ + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +void txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 cleaned_count); +void txgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); +#else +bool txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 cleaned_count); +#endif +int txgbe_clean_rx_irq_zc(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + const int budget); +void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring); +bool txgbe_clean_xdp_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring); +#ifdef HAVE_NDO_XSK_WAKEUP +int txgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +#else +int txgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +#endif +void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring); +bool txgbe_xsk_any_rx_ring_enabled(struct txgbe_adapter *adapter); +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif /* HAVE_XDP_SUPPORT */ + +bool txgbe_cleanup_headers(struct txgbe_ring __maybe_unused *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); +void txgbe_process_skb_fields(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); +void txgbe_rx_skb(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); + +#endif /* _TXGBE_TXRX_COMMON_H_ */ diff --git a/scripts/sortextable b/scripts/sortextable new file mode 100755 index 0000000000000000000000000000000000000000..2e0f9608889fa2029ff2b2fc681c4960467ad421 Binary files /dev/null and b/scripts/sortextable differ